summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2024-11-03 23:16:30 -0800
committerNamhyung Kim <namhyung@kernel.org>2024-11-03 23:18:20 -0800
commitaa5c90601b531323f82ceb02b41a66974153b76f (patch)
tree80ca2bed534b3b356bffa60eab11980c3f9a3400 /tools
parentd36e5b36a2928b30e09ff59ce5ce2d5df935176e (diff)
parent59b723cd2adbac2a34fc8e12c74ae26ae45bf230 (diff)
Merge 'origin/master' into perf-tools-next
To get the fixes in the perf-tools branch. Resolved a conflict due to RISC-V's syscall table change. Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/arm64/include/asm/cputype.h2
-rw-r--r--tools/arch/x86/include/asm/msr-index.h34
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_32.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_64.h3
-rw-r--r--tools/include/linux/bits.h15
-rw-r--r--tools/include/linux/unaligned.h11
-rw-r--r--tools/include/uapi/linux/bits.h3
-rw-r--r--tools/include/uapi/linux/bpf.h25
-rw-r--r--tools/include/uapi/linux/const.h17
-rw-r--r--tools/include/vdso/unaligned.h15
-rw-r--r--tools/mm/page-types.c9
-rw-r--r--tools/mm/slabinfo.c4
-rw-r--r--tools/perf/Makefile.config4
-rw-r--r--tools/perf/builtin-trace.c2
-rwxr-xr-xtools/perf/check-headers.sh1
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh69
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c22
-rw-r--r--tools/perf/util/cap.c10
-rw-r--r--tools/perf/util/python.c3
-rw-r--r--tools/perf/util/syscalltbl.c10
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h2
-rw-r--r--tools/testing/cxl/test/cxl.c200
-rw-r--r--tools/testing/cxl/test/mem.c1
-rw-r--r--tools/testing/radix-tree/maple.c110
-rw-r--r--tools/testing/selftests/Makefile9
-rw-r--r--tools/testing/selftests/bpf/Makefile22
-rw-r--r--tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c39
-rw-r--r--tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c39
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c109
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c27
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fill_link_info.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c21
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c125
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h5
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_failure.c35
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_success.c78
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_module_order.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bits_iter.c61
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c55
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const.c31
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_linked_scalars.c34
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c40
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_mtu.c18
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_scalar_ids.c67
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_search_pruning.c23
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c34
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/veristat.cfg1
-rw-r--r--tools/testing/selftests/hid/Makefile1
-rwxr-xr-xtools/testing/selftests/intel_pstate/run.sh9
-rw-r--r--tools/testing/selftests/kvm/Makefile3
-rw-r--r--tools/testing/selftests/kvm/aarch64/set_id_regs.c16
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c2
-rw-r--r--tools/testing/selftests/mm/khugepaged.c2
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c5
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c9
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat_key.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier_key.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh14
-rw-r--r--tools/testing/selftests/net/forwarding/ip6gre_lib.sh80
-rw-r--r--tools/testing/selftests/net/lib/py/nsim.py1
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh9
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh115
-rw-r--r--tools/testing/selftests/net/netfilter/conntrack_dump_flush.c6
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_flowtable.sh39
-rwxr-xr-xtools/testing/selftests/net/rds/test.py5
-rw-r--r--tools/testing/selftests/sched_ext/Makefile73
-rw-r--r--tools/testing/selftests/sched_ext/create_dsq.bpf.c6
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c10
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/exit.bpf.c22
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/init_enable_count.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c58
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null.bpf.c6
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/prog_run.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c6
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c12
-rw-r--r--tools/testing/selftests/watchdog/watchdog-test.c6
-rw-r--r--tools/testing/vma/vma.c40
-rw-r--r--tools/usb/usbip/src/usbip_detach.c1
102 files changed, 1886 insertions, 445 deletions
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 5a7dfeb8e8eb..488f8e751349 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -94,6 +94,7 @@
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
+#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -176,6 +177,7 @@
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
+#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index a7c06a46fb76..3ae84c3b8e6d 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -36,6 +36,20 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
+/*
+ * Architectural memory types that are common to MTRRs, PAT, VMX MSRs, etc.
+ * Most MSRs support/allow only a subset of memory types, but the values
+ * themselves are common across all relevant MSRs.
+ */
+#define X86_MEMTYPE_UC 0ull /* Uncacheable, a.k.a. Strong Uncacheable */
+#define X86_MEMTYPE_WC 1ull /* Write Combining */
+/* RESERVED 2 */
+/* RESERVED 3 */
+#define X86_MEMTYPE_WT 4ull /* Write Through */
+#define X86_MEMTYPE_WP 5ull /* Write Protected */
+#define X86_MEMTYPE_WB 6ull /* Write Back */
+#define X86_MEMTYPE_UC_MINUS 7ull /* Weak Uncacheabled (PAT only) */
+
/* FRED MSRs */
#define MSR_IA32_FRED_RSP0 0x1cc /* Level 0 stack pointer */
#define MSR_IA32_FRED_RSP1 0x1cd /* Level 1 stack pointer */
@@ -365,6 +379,12 @@
#define MSR_IA32_CR_PAT 0x00000277
+#define PAT_VALUE(p0, p1, p2, p3, p4, p5, p6, p7) \
+ ((X86_MEMTYPE_ ## p0) | (X86_MEMTYPE_ ## p1 << 8) | \
+ (X86_MEMTYPE_ ## p2 << 16) | (X86_MEMTYPE_ ## p3 << 24) | \
+ (X86_MEMTYPE_ ## p4 << 32) | (X86_MEMTYPE_ ## p5 << 40) | \
+ (X86_MEMTYPE_ ## p6 << 48) | (X86_MEMTYPE_ ## p7 << 56))
+
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
@@ -1159,15 +1179,6 @@
#define MSR_IA32_VMX_VMFUNC 0x00000491
#define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492
-/* VMX_BASIC bits and bitmasks */
-#define VMX_BASIC_VMCS_SIZE_SHIFT 32
-#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
-#define VMX_BASIC_64 0x0001000000000000LLU
-#define VMX_BASIC_MEM_TYPE_SHIFT 50
-#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
-#define VMX_BASIC_MEM_TYPE_WB 6LLU
-#define VMX_BASIC_INOUT 0x0040000000000000LLU
-
/* Resctrl MSRs: */
/* - Intel: */
#define MSR_IA32_L3_QOS_CFG 0xc81
@@ -1185,11 +1196,6 @@
#define MSR_IA32_SMBA_BW_BASE 0xc0000280
#define MSR_IA32_EVT_CFG_BASE 0xc0000400
-/* MSR_IA32_VMX_MISC bits */
-#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
-#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
-#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
-
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index bf57a824f722..a8debbf2f702 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -439,6 +439,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
+#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
diff --git a/tools/arch/x86/include/uapi/asm/unistd_32.h b/tools/arch/x86/include/uapi/asm/unistd_32.h
index 9de35df1afc3..63182a023e9d 100644
--- a/tools/arch/x86/include/uapi/asm/unistd_32.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_32.h
@@ -11,6 +11,9 @@
#ifndef __NR_getpgid
#define __NR_getpgid 132
#endif
+#ifndef __NR_capget
+#define __NR_capget 184
+#endif
#ifndef __NR_gettid
#define __NR_gettid 224
#endif
diff --git a/tools/arch/x86/include/uapi/asm/unistd_64.h b/tools/arch/x86/include/uapi/asm/unistd_64.h
index d0f2043d7132..77311e8d1b5d 100644
--- a/tools/arch/x86/include/uapi/asm/unistd_64.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_64.h
@@ -11,6 +11,9 @@
#ifndef __NR_getpgid
#define __NR_getpgid 121
#endif
+#ifndef __NR_capget
+#define __NR_capget 125
+#endif
#ifndef __NR_gettid
#define __NR_gettid 186
#endif
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
index 0eb24d21aac2..60044b608817 100644
--- a/tools/include/linux/bits.h
+++ b/tools/include/linux/bits.h
@@ -36,4 +36,19 @@
#define GENMASK_ULL(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __GENMASK_U128() depends on _BIT128() which would not work
+ * in the asm code, as it shifts an 'unsigned __init128' data
+ * type instead of direct representation of 128 bit constants
+ * such as long and unsigned long. The fundamental problem is
+ * that a 128 bit constant will get silently truncated by the
+ * gcc compiler.
+ */
+#define GENMASK_U128(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
+#endif
+
#endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/unaligned.h b/tools/include/linux/unaligned.h
index bc0633bc4650..395a4464fe73 100644
--- a/tools/include/linux/unaligned.h
+++ b/tools/include/linux/unaligned.h
@@ -9,16 +9,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
#pragma GCC diagnostic ignored "-Wattributes"
-
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x; \
-})
-
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x = (val); \
-} while (0)
+#include <vdso/unaligned.h>
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
diff --git a/tools/include/uapi/linux/bits.h b/tools/include/uapi/linux/bits.h
index 3c2a101986a3..5ee30f882736 100644
--- a/tools/include/uapi/linux/bits.h
+++ b/tools/include/uapi/linux/bits.h
@@ -12,4 +12,7 @@
(((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
(~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+#define __GENMASK_U128(h, l) \
+ ((_BIT128((h)) << 1) - (_BIT128(l)))
+
#endif /* _UAPI_LINUX_BITS_H */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 1fb3cb2636e6..4a939c90dc2e 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1121,6 +1121,9 @@ enum bpf_attach_type {
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+/* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
+ * in sync with the definitions below.
+ */
enum bpf_link_type {
BPF_LINK_TYPE_UNSPEC = 0,
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
@@ -5519,11 +5522,12 @@ union bpf_attr {
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
- * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * void *bpf_kptr_xchg(void *dst, void *ptr)
* Description
- * Exchange kptr at pointer *map_value* with *ptr*, and return the
- * old value. *ptr* can be NULL, otherwise it must be a referenced
- * pointer which will be released when this helper is called.
+ * Exchange kptr at pointer *dst* with *ptr*, and return the old value.
+ * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
+ * it must be a referenced pointer which will be released when this helper
+ * is called.
* Return
* The old value of kptr (which can be NULL). The returned pointer
* if not NULL, is a reference which must be released using its
@@ -6046,11 +6050,6 @@ enum {
BPF_F_MARK_ENFORCE = (1ULL << 6),
};
-/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
-enum {
- BPF_F_INGRESS = (1ULL << 0),
-};
-
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
enum {
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
@@ -6197,10 +6196,12 @@ enum {
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
};
-/* Flags for bpf_redirect_map helper */
+/* Flags for bpf_redirect and bpf_redirect_map helpers */
enum {
- BPF_F_BROADCAST = (1ULL << 3),
- BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
+ BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
+ BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
+ BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
+#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
};
#define __bpf_md_ptr(type, name) \
diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h
index a429381e7ca5..e16be0d37746 100644
--- a/tools/include/uapi/linux/const.h
+++ b/tools/include/uapi/linux/const.h
@@ -28,6 +28,23 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __BIT128() would not work in the asm code, as it shifts an
+ * 'unsigned __init128' data type as direct representation of
+ * 128 bit constants is not supported in the gcc compiler, as
+ * they get silently truncated.
+ *
+ * TODO: Please revisit this implementation when gcc compiler
+ * starts representing 128 bit constants directly like long
+ * and unsigned long etc. Subsequently drop the comment for
+ * GENMASK_U128() which would then start supporting asm code.
+ */
+#define _BIT128(x) ((unsigned __int128)(1) << (x))
+#endif
+
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
diff --git a/tools/include/vdso/unaligned.h b/tools/include/vdso/unaligned.h
new file mode 100644
index 000000000000..eee3d2a4dbe4
--- /dev/null
+++ b/tools/include/vdso/unaligned.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_UNALIGNED_H
+#define __VDSO_UNALIGNED_H
+
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#endif /* __VDSO_UNALIGNED_H */
diff --git a/tools/mm/page-types.c b/tools/mm/page-types.c
index fa050d5a48cd..6eb17cc1a06c 100644
--- a/tools/mm/page-types.c
+++ b/tools/mm/page-types.c
@@ -22,6 +22,7 @@
#include <time.h>
#include <setjmp.h>
#include <signal.h>
+#include <inttypes.h>
#include <sys/types.h>
#include <sys/errno.h>
#include <sys/fcntl.h>
@@ -391,9 +392,9 @@ static void show_page_range(unsigned long voffset, unsigned long offset,
if (opt_file)
printf("%lx\t", voff);
if (opt_list_cgroup)
- printf("@%llu\t", (unsigned long long)cgroup0);
+ printf("@%" PRIu64 "\t", cgroup0);
if (opt_list_mapcnt)
- printf("%lu\t", mapcnt0);
+ printf("%" PRIu64 "\t", mapcnt0);
printf("%lx\t%lx\t%s\n",
index, count, page_flag_name(flags0));
}
@@ -419,9 +420,9 @@ static void show_page(unsigned long voffset, unsigned long offset,
if (opt_file)
printf("%lx\t", voffset);
if (opt_list_cgroup)
- printf("@%llu\t", (unsigned long long)cgroup);
+ printf("@%" PRIu64 "\t", cgroup)
if (opt_list_mapcnt)
- printf("%lu\t", mapcnt);
+ printf("%" PRIu64 "\t", mapcnt);
printf("%lx\t%s\n", offset, page_flag_name(flags));
}
diff --git a/tools/mm/slabinfo.c b/tools/mm/slabinfo.c
index cfaeaea71042..04e9e6ba86ea 100644
--- a/tools/mm/slabinfo.c
+++ b/tools/mm/slabinfo.c
@@ -1297,7 +1297,9 @@ static void read_slab_dir(void)
slab->cpu_partial_free = get_obj("cpu_partial_free");
slab->alloc_node_mismatch = get_obj("alloc_node_mismatch");
slab->deactivate_bypass = get_obj("deactivate_bypass");
- chdir("..");
+ if (chdir(".."))
+ fatal("Unable to chdir from slab ../%s\n",
+ slab->name);
if (slab->name[0] == ':')
alias_targets++;
slab++;
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 854f84e80256..8da6d91b7ad5 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -691,8 +691,8 @@ ifeq ($(BUILD_BPF_SKEL),1)
BUILD_BPF_SKEL := 0
else
CLANG_VERSION := $(shell $(CLANG) --version | head -1 | sed 's/.*clang version \([[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+\).*/\1/g')
- ifeq ($(call version-lt3,$(CLANG_VERSION),16.0.6),1)
- $(warning Warning: Disabled BPF skeletons as at least $(CLANG) version 16.0.6 is reported to be a working setup with the current of BPF based perf features)
+ ifeq ($(call version-lt3,$(CLANG_VERSION),12.0.1),1)
+ $(warning Warning: Disabled BPF skeletons as reliable BTF generation needs at least $(CLANG) version 12.0.1)
BUILD_BPF_SKEL := 0
endif
endif
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 748b061f8678..c1e9e14c8686 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1399,7 +1399,7 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
{ .name = "waitid", .errpid = true,
.arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
- { .name = "write", .errpid = true,
+ { .name = "write",
.arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, },
};
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 29adbb423327..a05c1c105c51 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -22,6 +22,7 @@ FILES=(
"include/vdso/bits.h"
"include/linux/const.h"
"include/vdso/const.h"
+ "include/vdso/unaligned.h"
"include/linux/hash.h"
"include/linux/list-sort.h"
"include/uapi/linux/hw_breakpoint.h"
diff --git a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
index b5dc10b2a738..bead723e34af 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
@@ -19,35 +19,74 @@
TEST_RESULT=0
# skip if not supported
-BLACKFUNC=`head -n 1 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
-if [ -z "$BLACKFUNC" ]; then
+BLACKFUNC_LIST=`head -n 5 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
+if [ -z "$BLACKFUNC_LIST" ]; then
print_overall_skipped
exit 0
fi
+# try to find vmlinux with DWARF debug info
+VMLINUX_FILE=$(perf probe -v random_probe |& grep "Using.*for symbols" | sed -r 's/^Using (.*) for symbols$/\1/')
+
# remove all previously added probes
clear_all_probes
### adding blacklisted function
-
-# functions from blacklist should be skipped by perf probe
-! $CMD_PERF probe $BLACKFUNC > $LOGS_DIR/adding_blacklisted.log 2> $LOGS_DIR/adding_blacklisted.err
-PERF_EXIT_CODE=$?
-
REGEX_SCOPE_FAIL="Failed to find scope of probe point"
REGEX_SKIP_MESSAGE=" is blacklisted function, skip it\."
-REGEX_NOT_FOUND_MESSAGE="Probe point \'$BLACKFUNC\' not found."
+REGEX_NOT_FOUND_MESSAGE="Probe point \'$RE_EVENT\' not found."
REGEX_ERROR_MESSAGE="Error: Failed to add events."
REGEX_INVALID_ARGUMENT="Failed to write event: Invalid argument"
REGEX_SYMBOL_FAIL="Failed to find symbol at $RE_ADDRESS"
-REGEX_OUT_SECTION="$BLACKFUNC is out of \.\w+, skip it"
-../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
-CHECK_EXIT_CODE=$?
-
-print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding blacklisted function $BLACKFUNC"
-(( TEST_RESULT += $? ))
-
+REGEX_OUT_SECTION="$RE_EVENT is out of \.\w+, skip it"
+REGEX_MISSING_DECL_LINE="A function DIE doesn't have decl_line. Maybe broken DWARF?"
+
+BLACKFUNC=""
+SKIP_DWARF=0
+
+for BLACKFUNC in $BLACKFUNC_LIST; do
+ echo "Probing $BLACKFUNC"
+
+ # functions from blacklist should be skipped by perf probe
+ ! $CMD_PERF probe $BLACKFUNC > $LOGS_DIR/adding_blacklisted.log 2> $LOGS_DIR/adding_blacklisted.err
+ PERF_EXIT_CODE=$?
+
+ # check for bad DWARF polluting the result
+ ../common/check_all_patterns_found.pl "$REGEX_MISSING_DECL_LINE" >/dev/null < $LOGS_DIR/adding_blacklisted.err
+
+ if [ $? -eq 0 ]; then
+ SKIP_DWARF=1
+ echo "Result polluted by broken DWARF, trying another probe"
+
+ # confirm that the broken DWARF comes from assembler
+ if [ -n "$VMLINUX_FILE" ]; then
+ readelf -wi "$VMLINUX_FILE" |
+ awk -v probe="$BLACKFUNC" '/DW_AT_language/ { comp_lang = $0 }
+ $0 ~ probe { if (comp_lang) { print comp_lang }; exit }' |
+ grep -q "MIPS assembler"
+
+ CHECK_EXIT_CODE=$?
+ if [ $CHECK_EXIT_CODE -ne 0 ]; then
+ SKIP_DWARF=0 # broken DWARF while available
+ break
+ fi
+ fi
+ else
+ ../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
+ CHECK_EXIT_CODE=$?
+
+ SKIP_DWARF=0
+ break
+ fi
+done
+
+if [ $SKIP_DWARF -eq 1 ]; then
+ print_testcase_skipped "adding blacklisted function $BLACKFUNC"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding blacklisted function $BLACKFUNC"
+ (( TEST_RESULT += $? ))
+fi
### listing not-added probe
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index b2f17cca014b..4a62ed593e84 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -288,6 +288,10 @@ int sys_enter_rename(struct syscall_enter_args *args)
augmented_args->arg.size = PERF_ALIGN(oldpath_len + 1, sizeof(u64));
len += augmented_args->arg.size;
+ /* Every read from userspace is limited to value size */
+ if (augmented_args->arg.size > sizeof(augmented_args->arg.value))
+ return 1; /* Failure: don't filter */
+
struct augmented_arg *arg2 = (void *)&augmented_args->arg.value + augmented_args->arg.size;
newpath_len = augmented_arg__read_str(arg2, newpath_arg, sizeof(augmented_args->arg.value));
@@ -315,6 +319,10 @@ int sys_enter_renameat2(struct syscall_enter_args *args)
augmented_args->arg.size = PERF_ALIGN(oldpath_len + 1, sizeof(u64));
len += augmented_args->arg.size;
+ /* Every read from userspace is limited to value size */
+ if (augmented_args->arg.size > sizeof(augmented_args->arg.value))
+ return 1; /* Failure: don't filter */
+
struct augmented_arg *arg2 = (void *)&augmented_args->arg.value + augmented_args->arg.size;
newpath_len = augmented_arg__read_str(arg2, newpath_arg, sizeof(augmented_args->arg.value));
@@ -423,8 +431,9 @@ static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
{
bool augmented, do_output = false;
- int zero = 0, size, aug_size, index, output = 0,
+ int zero = 0, size, aug_size, index,
value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
+ u64 output = 0; /* has to be u64, otherwise it won't pass the verifier */
unsigned int nr, *beauty_map;
struct beauty_payload_enter *payload;
void *arg, *payload_offset;
@@ -477,6 +486,8 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
augmented = true;
} else if (size < 0 && size >= -6) { /* buffer */
index = -(size + 1);
+ barrier_var(index); // Prevent clang (noticed with v18) from removing the &= 7 trick.
+ index &= 7; // Satisfy the bounds checking with the verifier in some kernels.
aug_size = args->args[index];
if (aug_size > TRACE_AUG_MAX_BUF)
@@ -488,10 +499,17 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
}
}
+ /* Augmented data size is limited to sizeof(augmented_arg->unnamed union with value field) */
+ if (aug_size > value_size)
+ aug_size = value_size;
+
/* write data to payload */
if (augmented) {
int written = offsetof(struct augmented_arg, value) + aug_size;
+ if (written < 0 || written > sizeof(struct augmented_arg))
+ return 1;
+
((struct augmented_arg *)payload_offset)->size = aug_size;
output += written;
payload_offset += written;
@@ -499,7 +517,7 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
}
}
- if (!do_output)
+ if (!do_output || (sizeof(struct syscall_enter_args) + output) > sizeof(struct beauty_payload_enter))
return 1;
return augmented__beauty_output(ctx, payload, sizeof(struct syscall_enter_args) + output);
diff --git a/tools/perf/util/cap.c b/tools/perf/util/cap.c
index 7574a67651bc..69d9a2bcd40b 100644
--- a/tools/perf/util/cap.c
+++ b/tools/perf/util/cap.c
@@ -7,13 +7,9 @@
#include "debug.h"
#include <errno.h>
#include <string.h>
-#include <unistd.h>
#include <linux/capability.h>
#include <sys/syscall.h>
-
-#ifndef SYS_capget
-#define SYS_capget 90
-#endif
+#include <unistd.h>
#define MAX_LINUX_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
@@ -21,9 +17,9 @@ bool perf_cap__capable(int cap, bool *used_root)
{
struct __user_cap_header_struct header = {
.version = _LINUX_CAPABILITY_VERSION_3,
- .pid = getpid(),
+ .pid = 0,
};
- struct __user_cap_data_struct data[MAX_LINUX_CAPABILITY_U32S];
+ struct __user_cap_data_struct data[MAX_LINUX_CAPABILITY_U32S] = {};
__u32 cap_val;
*used_root = false;
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 31a223eaf8e6..ee3d43a7ba45 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -19,6 +19,7 @@
#include "util/bpf-filter.h"
#include "util/env.h"
#include "util/kvm-stat.h"
+#include "util/stat.h"
#include "util/kwork.h"
#include "util/sample.h"
#include "util/lock-contention.h"
@@ -1355,6 +1356,7 @@ error:
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
+#ifdef HAVE_KVM_STAT_SUPPORT
bool kvm_entry_event(struct evsel *evsel __maybe_unused)
{
return false;
@@ -1384,6 +1386,7 @@ void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
char *decode __maybe_unused)
{
}
+#endif // HAVE_KVM_STAT_SUPPORT
int find_scripts(char **scripts_array __maybe_unused, char **scripts_path_array __maybe_unused,
int num __maybe_unused, int pathlen __maybe_unused)
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 349986f6e5f5..69d8dcf5cf28 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -50,6 +50,11 @@ static const char *const *syscalltbl_native = syscalltbl_loongarch;
#include <asm/syscalls.c>
const int syscalltbl_native_max_id = SYSCALLTBL_RISCV_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_riscv;
+#else
+const int syscalltbl_native_max_id = 0;
+static const char *const syscalltbl_native[] = {
+ [0] = "unknown",
+};
#endif
struct syscall {
@@ -186,6 +191,11 @@ int syscalltbl__id(struct syscalltbl *tbl, const char *name)
return audit_name_to_syscall(name, tbl->audit_machine);
}
+int syscalltbl__id_at_idx(struct syscalltbl *tbl __maybe_unused, int idx)
+{
+ return idx;
+}
+
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl __maybe_unused,
const char *syscall_glob __maybe_unused, int *idx __maybe_unused)
{
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 27749c51c3ec..248ab790d143 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -320,7 +320,7 @@ u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
/*
* Access a cpumask in read-only mode (typically to check bits).
*/
-const struct cpumask *cast_mask(struct bpf_cpumask *mask)
+static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask)
{
return (const struct cpumask *)mask;
}
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 90d5afd52dd0..050725afa45d 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -693,26 +693,22 @@ static int mock_decoder_commit(struct cxl_decoder *cxld)
return 0;
}
-static int mock_decoder_reset(struct cxl_decoder *cxld)
+static void mock_decoder_reset(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
int id = cxld->id;
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
- return 0;
+ return;
dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
- if (port->commit_end != id) {
+ if (port->commit_end == id)
+ cxl_port_commit_reap(cxld);
+ else
dev_dbg(&port->dev,
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
- return -EBUSY;
- }
-
- port->commit_end--;
cxld->flags &= ~CXL_DECODER_F_ENABLE;
-
- return 0;
}
static void default_mock_decoder(struct cxl_decoder *cxld)
@@ -1062,7 +1058,7 @@ static void mock_companion(struct acpi_device *adev, struct device *dev)
#define SZ_64G (SZ_32G * 2)
#endif
-static __init int cxl_rch_init(void)
+static __init int cxl_rch_topo_init(void)
{
int rc, i;
@@ -1090,30 +1086,8 @@ static __init int cxl_rch_init(void)
goto err_bridge;
}
- for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
- int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
- struct platform_device *rch = cxl_rch[i];
- struct platform_device *pdev;
-
- pdev = platform_device_alloc("cxl_rcd", idx);
- if (!pdev)
- goto err_mem;
- pdev->dev.parent = &rch->dev;
- set_dev_node(&pdev->dev, i % 2);
-
- rc = platform_device_add(pdev);
- if (rc) {
- platform_device_put(pdev);
- goto err_mem;
- }
- cxl_rcd[i] = pdev;
- }
-
return 0;
-err_mem:
- for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
- platform_device_unregister(cxl_rcd[i]);
err_bridge:
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
struct platform_device *pdev = cxl_rch[i];
@@ -1127,12 +1101,10 @@ err_bridge:
return rc;
}
-static void cxl_rch_exit(void)
+static void cxl_rch_topo_exit(void)
{
int i;
- for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
- platform_device_unregister(cxl_rcd[i]);
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
struct platform_device *pdev = cxl_rch[i];
@@ -1143,7 +1115,7 @@ static void cxl_rch_exit(void)
}
}
-static __init int cxl_single_init(void)
+static __init int cxl_single_topo_init(void)
{
int i, rc;
@@ -1228,29 +1200,8 @@ static __init int cxl_single_init(void)
cxl_swd_single[i] = pdev;
}
- for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
- struct platform_device *dport = cxl_swd_single[i];
- struct platform_device *pdev;
-
- pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
- if (!pdev)
- goto err_mem;
- pdev->dev.parent = &dport->dev;
- set_dev_node(&pdev->dev, i % 2);
-
- rc = platform_device_add(pdev);
- if (rc) {
- platform_device_put(pdev);
- goto err_mem;
- }
- cxl_mem_single[i] = pdev;
- }
-
return 0;
-err_mem:
- for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
- platform_device_unregister(cxl_mem_single[i]);
err_dport:
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
platform_device_unregister(cxl_swd_single[i]);
@@ -1273,12 +1224,10 @@ err_bridge:
return rc;
}
-static void cxl_single_exit(void)
+static void cxl_single_topo_exit(void)
{
int i;
- for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
- platform_device_unregister(cxl_mem_single[i]);
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
platform_device_unregister(cxl_swd_single[i]);
for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
@@ -1295,6 +1244,91 @@ static void cxl_single_exit(void)
}
}
+static void cxl_mem_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_rcd[i]);
+ for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem_single[i]);
+ for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem[i]);
+}
+
+static int cxl_mem_init(void)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
+ struct platform_device *dport = cxl_switch_dport[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_mem", i);
+ if (!pdev)
+ goto err_mem;
+ pdev->dev.parent = &dport->dev;
+ set_dev_node(&pdev->dev, i % 2);
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_mem;
+ }
+ cxl_mem[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
+ struct platform_device *dport = cxl_swd_single[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
+ if (!pdev)
+ goto err_single;
+ pdev->dev.parent = &dport->dev;
+ set_dev_node(&pdev->dev, i % 2);
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_single;
+ }
+ cxl_mem_single[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
+ int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
+ struct platform_device *rch = cxl_rch[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_rcd", idx);
+ if (!pdev)
+ goto err_rcd;
+ pdev->dev.parent = &rch->dev;
+ set_dev_node(&pdev->dev, i % 2);
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_rcd;
+ }
+ cxl_rcd[i] = pdev;
+ }
+
+ return 0;
+
+err_rcd:
+ for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_rcd[i]);
+err_single:
+ for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem_single[i]);
+err_mem:
+ for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem[i]);
+ return rc;
+}
+
static __init int cxl_test_init(void)
{
int rc, i;
@@ -1407,29 +1441,11 @@ static __init int cxl_test_init(void)
cxl_switch_dport[i] = pdev;
}
- for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
- struct platform_device *dport = cxl_switch_dport[i];
- struct platform_device *pdev;
-
- pdev = platform_device_alloc("cxl_mem", i);
- if (!pdev)
- goto err_mem;
- pdev->dev.parent = &dport->dev;
- set_dev_node(&pdev->dev, i % 2);
-
- rc = platform_device_add(pdev);
- if (rc) {
- platform_device_put(pdev);
- goto err_mem;
- }
- cxl_mem[i] = pdev;
- }
-
- rc = cxl_single_init();
+ rc = cxl_single_topo_init();
if (rc)
- goto err_mem;
+ goto err_dport;
- rc = cxl_rch_init();
+ rc = cxl_rch_topo_init();
if (rc)
goto err_single;
@@ -1442,19 +1458,20 @@ static __init int cxl_test_init(void)
rc = platform_device_add(cxl_acpi);
if (rc)
- goto err_add;
+ goto err_root;
+
+ rc = cxl_mem_init();
+ if (rc)
+ goto err_root;
return 0;
-err_add:
+err_root:
platform_device_put(cxl_acpi);
err_rch:
- cxl_rch_exit();
+ cxl_rch_topo_exit();
err_single:
- cxl_single_exit();
-err_mem:
- for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
- platform_device_unregister(cxl_mem[i]);
+ cxl_single_topo_exit();
err_dport:
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
platform_device_unregister(cxl_switch_dport[i]);
@@ -1486,11 +1503,10 @@ static __exit void cxl_test_exit(void)
{
int i;
+ cxl_mem_exit();
platform_device_unregister(cxl_acpi);
- cxl_rch_exit();
- cxl_single_exit();
- for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
- platform_device_unregister(cxl_mem[i]);
+ cxl_rch_topo_exit();
+ cxl_single_topo_exit();
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
platform_device_unregister(cxl_switch_dport[i]);
for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index ad5c4c18c5c6..71916e0e1546 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -1673,6 +1673,7 @@ static struct platform_driver cxl_mock_mem_driver = {
.name = KBUILD_MODNAME,
.dev_groups = cxl_mock_mem_groups,
.groups = cxl_mock_mem_core_groups,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 1873ddbe16cc..551ae6898c1d 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -36317,6 +36317,28 @@ static inline int check_vma_modification(struct maple_tree *mt)
return 0;
}
+/*
+ * test to check that bulk stores do not use wr_rebalance as the store
+ * type.
+ */
+static inline void check_bulk_rebalance(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, ULONG_MAX, ULONG_MAX);
+ int max = 10;
+
+ build_full_tree(mt, 0, 2);
+
+ /* erase every entry in the tree */
+ do {
+ /* set up bulk store mode */
+ mas_expected_entries(&mas, max);
+ mas_erase(&mas);
+ MT_BUG_ON(mt, mas.store_type == wr_rebalance);
+ } while (mas_prev(&mas, 0) != NULL);
+
+ mas_destroy(&mas);
+}
+
void farmer_tests(void)
{
struct maple_node *node;
@@ -36328,6 +36350,10 @@ void farmer_tests(void)
check_vma_modification(&tree);
mtree_destroy(&tree);
+ mt_init(&tree);
+ check_bulk_rebalance(&tree);
+ mtree_destroy(&tree);
+
tree.ma_root = xa_mk_value(0);
mt_dump(&tree, mt_dump_dec);
@@ -36406,9 +36432,93 @@ void farmer_tests(void)
check_nomem(&tree);
}
+static unsigned long get_last_index(struct ma_state *mas)
+{
+ struct maple_node *node = mas_mn(mas);
+ enum maple_type mt = mte_node_type(mas->node);
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned long last_index = mas_data_end(mas);
+
+ BUG_ON(last_index == 0);
+
+ return pivots[last_index - 1] + 1;
+}
+
+/*
+ * Assert that we handle spanning stores that consume the entirety of the right
+ * leaf node correctly.
+ */
+static void test_spanning_store_regression(void)
+{
+ unsigned long from = 0, to = 0;
+ DEFINE_MTREE(tree);
+ MA_STATE(mas, &tree, 0, 0);
+
+ /*
+ * Build a 3-level tree. We require a parent node below the root node
+ * and 2 leaf nodes under it, so we can span the entirety of the right
+ * hand node.
+ */
+ build_full_tree(&tree, 0, 3);
+
+ /* Descend into position at depth 2. */
+ mas_reset(&mas);
+ mas_start(&mas);
+ mas_descend(&mas);
+ mas_descend(&mas);
+
+ /*
+ * We need to establish a tree like the below.
+ *
+ * Then we can try a store in [from, to] which results in a spanned
+ * store across nodes B and C, with the maple state at the time of the
+ * write being such that only the subtree at A and below is considered.
+ *
+ * Height
+ * 0 Root Node
+ * / \
+ * pivot = to / \ pivot = ULONG_MAX
+ * / \
+ * 1 A [-----] ...
+ * / \
+ * pivot = from / \ pivot = to
+ * / \
+ * 2 (LEAVES) B [-----] [-----] C
+ * ^--- Last pivot to.
+ */
+ while (true) {
+ unsigned long tmp = get_last_index(&mas);
+
+ if (mas_next_sibling(&mas)) {
+ from = tmp;
+ to = mas.max;
+ } else {
+ break;
+ }
+ }
+
+ BUG_ON(from == 0 && to == 0);
+
+ /* Perform the store. */
+ mas_set_range(&mas, from, to);
+ mas_store_gfp(&mas, xa_mk_value(0xdead), GFP_KERNEL);
+
+ /* If the regression occurs, the validation will fail. */
+ mt_validate(&tree);
+
+ /* Cleanup. */
+ __mt_destroy(&tree);
+}
+
+static void regression_tests(void)
+{
+ test_spanning_store_regression();
+}
+
void maple_tree_tests(void)
{
#if !defined(BENCH)
+ regression_tests();
farmer_tests();
#endif
maple_tree_seed();
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index b38199965f99..363d031a16f7 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -88,6 +88,7 @@ TARGETS += rlimits
TARGETS += rseq
TARGETS += rtc
TARGETS += rust
+TARGETS += sched_ext
TARGETS += seccomp
TARGETS += sgx
TARGETS += sigaltstack
@@ -129,10 +130,10 @@ ifeq ($(filter net/lib,$(TARGETS)),)
endif
endif
-# User can optionally provide a TARGETS skiplist. By default we skip
-# BPF since it has cutting edge build time dependencies which require
-# more effort to install.
-SKIP_TARGETS ?= bpf
+# User can optionally provide a TARGETS skiplist. By default we skip
+# targets using BPF since it has cutting edge build time dependencies
+# which require more effort to install.
+SKIP_TARGETS ?= bpf sched_ext
ifneq ($(SKIP_TARGETS),)
TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
override TARGETS := $(TMP)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index f04af11df8eb..75016962f795 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -157,7 +157,8 @@ TEST_GEN_PROGS_EXTENDED = \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
- xdp_features bpf_test_no_cfi.ko
+ xdp_features bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
+ bpf_test_modorder_y.ko
TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
@@ -263,7 +264,7 @@ $(OUTPUT)/%:%.c
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 riscv))
LLD := lld
else
-LLD := ld
+LLD := $(shell command -v $(LD))
endif
# Filter out -static for liburandom_read.so and its dependent targets so that static builds
@@ -303,6 +304,19 @@ $(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_te
$(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_no_cfi
$(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@
+$(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch])
+ $(call msg,MOD,,$@)
+ $(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation
+ $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_x
+ $(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@
+
+$(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch])
+ $(call msg,MOD,,$@)
+ $(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation
+ $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_y
+ $(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@
+
+
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
ifneq ($(CROSS_COMPILE),)
CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
@@ -722,6 +736,8 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
ip_check_defrag_frags.h
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
$(OUTPUT)/bpf_test_no_cfi.ko \
+ $(OUTPUT)/bpf_test_modorder_x.ko \
+ $(OUTPUT)/bpf_test_modorder_y.ko \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
$(OUTPUT)/sign-file \
@@ -856,6 +872,8 @@ EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
$(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \
no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
bpf_test_no_cfi.ko \
+ bpf_test_modorder_x.ko \
+ bpf_test_modorder_y.ko \
liburandom_read.so) \
$(OUTPUT)/FEATURE-DUMP.selftests
diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile b/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile
new file mode 100644
index 000000000000..40b25b98ad1b
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile
@@ -0,0 +1,19 @@
+BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
+KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
+
+ifeq ($(V),1)
+Q =
+else
+Q = @
+endif
+
+MODULES = bpf_test_modorder_x.ko
+
+obj-m += bpf_test_modorder_x.o
+
+all:
+ +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
+
+clean:
+ +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean
+
diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c b/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c
new file mode 100644
index 000000000000..0cc747fa912f
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+__bpf_kfunc_start_defs();
+
+__bpf_kfunc int bpf_test_modorder_retx(void)
+{
+ return 'x';
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(bpf_test_modorder_kfunc_x_ids)
+BTF_ID_FLAGS(func, bpf_test_modorder_retx);
+BTF_KFUNCS_END(bpf_test_modorder_kfunc_x_ids)
+
+static const struct btf_kfunc_id_set bpf_test_modorder_x_set = {
+ .owner = THIS_MODULE,
+ .set = &bpf_test_modorder_kfunc_x_ids,
+};
+
+static int __init bpf_test_modorder_x_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
+ &bpf_test_modorder_x_set);
+}
+
+static void __exit bpf_test_modorder_x_exit(void)
+{
+}
+
+module_init(bpf_test_modorder_x_init);
+module_exit(bpf_test_modorder_x_exit);
+
+MODULE_DESCRIPTION("BPF selftest ordertest module X");
+MODULE_LICENSE("GPL");
diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile b/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile
new file mode 100644
index 000000000000..52c3ab9d84e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile
@@ -0,0 +1,19 @@
+BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
+KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
+
+ifeq ($(V),1)
+Q =
+else
+Q = @
+endif
+
+MODULES = bpf_test_modorder_y.ko
+
+obj-m += bpf_test_modorder_y.o
+
+all:
+ +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
+
+clean:
+ +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean
+
diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c b/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c
new file mode 100644
index 000000000000..c627ee085d13
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+__bpf_kfunc_start_defs();
+
+__bpf_kfunc int bpf_test_modorder_rety(void)
+{
+ return 'y';
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(bpf_test_modorder_kfunc_y_ids)
+BTF_ID_FLAGS(func, bpf_test_modorder_rety);
+BTF_KFUNCS_END(bpf_test_modorder_kfunc_y_ids)
+
+static const struct btf_kfunc_id_set bpf_test_modorder_y_set = {
+ .owner = THIS_MODULE,
+ .set = &bpf_test_modorder_kfunc_y_ids,
+};
+
+static int __init bpf_test_modorder_y_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
+ &bpf_test_modorder_y_set);
+}
+
+static void __exit bpf_test_modorder_y_exit(void)
+{
+}
+
+module_init(bpf_test_modorder_y_init);
+module_exit(bpf_test_modorder_y_exit);
+
+MODULE_DESCRIPTION("BPF selftest ordertest module Y");
+MODULE_LICENSE("GPL");
diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c
new file mode 100644
index 000000000000..0ba015686492
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <linux/bpf.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_maps.h>
+
+struct test_lpm_key {
+ __u32 prefix;
+ __u32 data;
+};
+
+struct get_next_key_ctx {
+ struct test_lpm_key key;
+ bool start;
+ bool stop;
+ int map_fd;
+ int loop;
+};
+
+static void *get_next_key_fn(void *arg)
+{
+ struct get_next_key_ctx *ctx = arg;
+ struct test_lpm_key next_key;
+ int i = 0;
+
+ while (!ctx->start)
+ usleep(1);
+
+ while (!ctx->stop && i++ < ctx->loop)
+ bpf_map_get_next_key(ctx->map_fd, &ctx->key, &next_key);
+
+ return NULL;
+}
+
+static void abort_get_next_key(struct get_next_key_ctx *ctx, pthread_t *tids,
+ unsigned int nr)
+{
+ unsigned int i;
+
+ ctx->stop = true;
+ ctx->start = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+}
+
+/* This test aims to prevent regression of future. As long as the kernel does
+ * not panic, it is considered as success.
+ */
+void test_lpm_trie_map_get_next_key(void)
+{
+#define MAX_NR_THREADS 8
+ LIBBPF_OPTS(bpf_map_create_opts, create_opts,
+ .map_flags = BPF_F_NO_PREALLOC);
+ struct test_lpm_key key = {};
+ __u32 val = 0;
+ int map_fd;
+ const __u32 max_prefixlen = 8 * (sizeof(key) - sizeof(key.prefix));
+ const __u32 max_entries = max_prefixlen + 1;
+ unsigned int i, nr = MAX_NR_THREADS, loop = 65536;
+ pthread_t tids[MAX_NR_THREADS];
+ struct get_next_key_ctx ctx;
+ int err;
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map",
+ sizeof(struct test_lpm_key), sizeof(__u32),
+ max_entries, &create_opts);
+ CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n",
+ strerror(errno));
+
+ for (i = 0; i <= max_prefixlen; i++) {
+ key.prefix = i;
+ err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
+ CHECK(err, "bpf_map_update_elem()", "error:%s\n",
+ strerror(errno));
+ }
+
+ ctx.start = false;
+ ctx.stop = false;
+ ctx.map_fd = map_fd;
+ ctx.loop = loop;
+ memcpy(&ctx.key, &key, sizeof(key));
+
+ for (i = 0; i < nr; i++) {
+ err = pthread_create(&tids[i], NULL, get_next_key_fn, &ctx);
+ if (err) {
+ abort_get_next_key(&ctx, tids, i);
+ CHECK(err, "pthread_create", "error %d\n", err);
+ }
+ }
+
+ ctx.start = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+
+ printf("%s:PASS\n", __func__);
+
+ close(map_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 52e6f7570475..f0a3a9c18e9e 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -226,7 +226,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
"pthread_create");
- skel->bss->tid = getpid();
+ skel->bss->tid = gettid();
do_dummy_read_opts(skel->progs.dump_task, opts);
@@ -249,25 +249,42 @@ static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown,
ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
}
-static void test_task_tid(void)
+static void *run_test_task_tid(void *arg)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
int num_unknown_tid, num_known_tid;
+ ASSERT_NEQ(getpid(), gettid(), "check_new_thread_id");
+
memset(&linfo, 0, sizeof(linfo));
- linfo.task.tid = getpid();
+ linfo.task.tid = gettid();
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
test_task_common(&opts, 0, 1);
linfo.task.tid = 0;
linfo.task.pid = getpid();
- test_task_common(&opts, 1, 1);
+ /* This includes the parent thread, this thread,
+ * and the do_nothing_wait thread
+ */
+ test_task_common(&opts, 2, 1);
test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
- ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
+ ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid");
ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
+
+ return NULL;
+}
+
+static void test_task_tid(void)
+{
+ pthread_t thread_id;
+
+ /* Create a new thread so pid and tid aren't the same */
+ ASSERT_OK(pthread_create(&thread_id, NULL, &run_test_task_tid, NULL),
+ "pthread_create");
+ ASSERT_FALSE(pthread_join(thread_id, NULL), "pthread_join");
}
static void test_task_pid(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
index 9250a1e9f9af..3f9ffdf71343 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
@@ -35,7 +35,7 @@ static int send_datagram(void)
if (!ASSERT_OK_FD(sock, "create socket"))
return sock;
- if (!ASSERT_OK(connect(sock, &addr, sizeof(addr)), "connect")) {
+ if (!ASSERT_OK(connect(sock, (struct sockaddr *)&addr, sizeof(addr)), "connect")) {
close(sock);
return -1;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
index 2570bd4b0cb2..e58a04654238 100644
--- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -23,6 +23,7 @@ static const char * const cpumask_success_testcases[] = {
"test_global_mask_array_l2_rcu",
"test_global_mask_nested_rcu",
"test_global_mask_nested_deep_rcu",
+ "test_global_mask_nested_deep_array_rcu",
"test_cpumask_weight",
};
diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
index f3932941bbaa..d50cbd8040d4 100644
--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
@@ -67,8 +67,9 @@ again:
ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie");
+ ASSERT_EQ(info.perf_event.kprobe.name_len, strlen(KPROBE_FUNC) + 1,
+ "name_len");
if (!info.perf_event.kprobe.func_name) {
- ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
info.perf_event.kprobe.name_len = sizeof(buf);
goto again;
@@ -79,8 +80,9 @@ again:
ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
break;
case BPF_PERF_EVENT_TRACEPOINT:
+ ASSERT_EQ(info.perf_event.tracepoint.name_len, strlen(TP_NAME) + 1,
+ "name_len");
if (!info.perf_event.tracepoint.tp_name) {
- ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
info.perf_event.tracepoint.name_len = sizeof(buf);
goto again;
@@ -96,8 +98,9 @@ again:
case BPF_PERF_EVENT_URETPROBE:
ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
+ ASSERT_EQ(info.perf_event.uprobe.name_len, strlen(UPROBE_FILE) + 1,
+ "name_len");
if (!info.perf_event.uprobe.file_name) {
- ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
info.perf_event.uprobe.name_len = sizeof(buf);
goto again;
@@ -417,6 +420,15 @@ verify_umulti_link_info(int fd, bool retprobe, __u64 *offsets,
if (!ASSERT_NEQ(err, -1, "readlink"))
return -1;
+ memset(&info, 0, sizeof(info));
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
+ return -1;
+
+ ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count");
+ ASSERT_EQ(info.uprobe_multi.path_size, strlen(path) + 1,
+ "info.uprobe_multi.path_size");
+
for (bit = 0; bit < 8; bit++) {
memset(&info, 0, sizeof(info));
info.uprobe_multi.path = ptr_to_u64(path_buf);
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c b/tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c
new file mode 100644
index 000000000000..48c0560d398e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <testing_helpers.h>
+
+#include "kfunc_module_order.skel.h"
+
+static int test_run_prog(const struct bpf_program *prog,
+ struct bpf_test_run_opts *opts)
+{
+ int err;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ return err;
+
+ if (!ASSERT_EQ((int)opts->retval, 0, bpf_program__name(prog)))
+ return -EINVAL;
+
+ return 0;
+}
+
+void test_kfunc_module_order(void)
+{
+ struct kfunc_module_order *skel;
+ char pkt_data[64] = {};
+ int err = 0;
+
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, test_opts, .data_in = pkt_data,
+ .data_size_in = sizeof(pkt_data));
+
+ err = load_module("bpf_test_modorder_x.ko",
+ env_verbosity > VERBOSE_NONE);
+ if (!ASSERT_OK(err, "load bpf_test_modorder_x.ko"))
+ return;
+
+ err = load_module("bpf_test_modorder_y.ko",
+ env_verbosity > VERBOSE_NONE);
+ if (!ASSERT_OK(err, "load bpf_test_modorder_y.ko"))
+ goto exit_modx;
+
+ skel = kfunc_module_order__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kfunc_module_order__open_and_load()")) {
+ err = -EINVAL;
+ goto exit_mods;
+ }
+
+ test_run_prog(skel->progs.call_kfunc_xy, &test_opts);
+ test_run_prog(skel->progs.call_kfunc_yx, &test_opts);
+
+ kfunc_module_order__destroy(skel);
+exit_mods:
+ unload_module("bpf_test_modorder_y", env_verbosity > VERBOSE_NONE);
+exit_modx:
+ unload_module("bpf_test_modorder_x", env_verbosity > VERBOSE_NONE);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c b/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c
index 4297a2a4cb11..2f52fa2641ba 100644
--- a/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c
@@ -26,10 +26,43 @@ static const struct nf_link_test nf_hook_link_tests[] = {
{ .pf = NFPROTO_INET, .priority = 1, .name = "invalid-inet-not-supported", },
- { .pf = NFPROTO_IPV4, .priority = -10000, .expect_success = true, .name = "attach ipv4", },
- { .pf = NFPROTO_IPV6, .priority = 10001, .expect_success = true, .name = "attach ipv6", },
+ {
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = -10000,
+ .flags = 0,
+ .expect_success = true,
+ .name = "attach ipv4",
+ },
+ {
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_FORWARD,
+ .priority = 10001,
+ .flags = BPF_F_NETFILTER_IP_DEFRAG,
+ .expect_success = true,
+ .name = "attach ipv6",
+ },
};
+static void verify_netfilter_link_info(struct bpf_link *link, const struct nf_link_test nf_expected)
+{
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ int err, fd;
+
+ memset(&info, 0, len);
+
+ fd = bpf_link__fd(link);
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_OK(err, "get_link_info");
+
+ ASSERT_EQ(info.type, BPF_LINK_TYPE_NETFILTER, "info link type");
+ ASSERT_EQ(info.netfilter.pf, nf_expected.pf, "info nf protocol family");
+ ASSERT_EQ(info.netfilter.hooknum, nf_expected.hooknum, "info nf hooknum");
+ ASSERT_EQ(info.netfilter.priority, nf_expected.priority, "info nf priority");
+ ASSERT_EQ(info.netfilter.flags, nf_expected.flags, "info nf flags");
+}
+
void test_netfilter_link_attach(void)
{
struct test_netfilter_link_attach *skel;
@@ -64,6 +97,8 @@ void test_netfilter_link_attach(void)
if (!ASSERT_OK_PTR(link, "program attach successful"))
continue;
+ verify_netfilter_link_info(link, nf_hook_link_tests[i]);
+
link2 = bpf_program__attach_netfilter(prog, &opts);
ASSERT_ERR_PTR(link2, "attach program with same pf/hook/priority");
@@ -73,6 +108,9 @@ void test_netfilter_link_attach(void)
link2 = bpf_program__attach_netfilter(prog, &opts);
if (!ASSERT_OK_PTR(link2, "program reattach successful"))
continue;
+
+ verify_netfilter_link_info(link2, nf_hook_link_tests[i]);
+
if (!ASSERT_OK(bpf_link__destroy(link2), "link destroy"))
break;
} else {
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index e26b5150fc43..75f7a2ce334b 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -44,6 +44,7 @@
#include "verifier_ld_ind.skel.h"
#include "verifier_ldsx.skel.h"
#include "verifier_leak_ptr.skel.h"
+#include "verifier_linked_scalars.skel.h"
#include "verifier_loops1.skel.h"
#include "verifier_lwt.skel.h"
#include "verifier_map_in_map.skel.h"
@@ -53,6 +54,7 @@
#include "verifier_masking.skel.h"
#include "verifier_meta_access.skel.h"
#include "verifier_movsx.skel.h"
+#include "verifier_mtu.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
#include "verifier_bpf_fastcall.skel.h"
@@ -170,6 +172,7 @@ void test_verifier_jit_convergence(void) { RUN(verifier_jit_convergence); }
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
+void test_verifier_linked_scalars(void) { RUN(verifier_linked_scalars); }
void test_verifier_loops1(void) { RUN(verifier_loops1); }
void test_verifier_lwt(void) { RUN(verifier_lwt); }
void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); }
@@ -221,6 +224,24 @@ void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_pack
void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
void test_verifier_lsm(void) { RUN(verifier_lsm); }
+void test_verifier_mtu(void)
+{
+ __u64 caps = 0;
+ int ret;
+
+ /* In case CAP_BPF and CAP_PERFMON is not set */
+ ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps);
+ if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin"))
+ return;
+ ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL);
+ if (!ASSERT_OK(ret, "disable_cap_sys_admin"))
+ goto restore_cap;
+ RUN(verifier_mtu);
+restore_cap:
+ if (caps)
+ cap_enable_effective(caps, NULL);
+}
+
static int init_test_val_map(struct bpf_object *obj, char *map_name)
{
struct test_val value = {
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
index ce6812558287..27ffed17d4be 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+#include <arpa/inet.h>
#include <uapi/linux/bpf.h>
#include <linux/if_link.h>
+#include <network_helpers.h>
+#include <net/if.h>
#include <test_progs.h>
#include "test_xdp_devmap_helpers.skel.h"
@@ -8,31 +11,36 @@
#include "test_xdp_with_devmap_helpers.skel.h"
#define IFINDEX_LO 1
+#define TEST_NS "devmap_attach_ns"
static void test_xdp_with_devmap_helpers(void)
{
- struct test_xdp_with_devmap_helpers *skel;
+ struct test_xdp_with_devmap_helpers *skel = NULL;
struct bpf_prog_info info = {};
struct bpf_devmap_val val = {
.ifindex = IFINDEX_LO,
};
__u32 len = sizeof(info);
- int err, dm_fd, map_fd;
+ int err, dm_fd, dm_fd_redir, map_fd;
+ struct nstoken *nstoken = NULL;
+ char data[10] = {};
__u32 idx = 0;
+ SYS(out_close, "ip netns add %s", TEST_NS);
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto out_close;
+ SYS(out_close, "ip link set dev lo up");
skel = test_xdp_with_devmap_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
- return;
+ goto out_close;
- dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
- err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);
+ dm_fd_redir = bpf_program__fd(skel->progs.xdp_redir_prog);
+ err = bpf_xdp_attach(IFINDEX_LO, dm_fd_redir, XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap"))
goto out_close;
- err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
- ASSERT_OK(err, "XDP program detach");
-
dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
map_fd = bpf_map__fd(skel->maps.dm_ports);
err = bpf_prog_get_info_by_fd(dm_fd, &info, &len);
@@ -47,6 +55,22 @@ static void test_xdp_with_devmap_helpers(void)
ASSERT_OK(err, "Read devmap entry");
ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");
+ /* send a packet to trigger any potential bugs in there */
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = 10,
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = 1,
+ );
+ err = bpf_prog_test_run_opts(dm_fd_redir, &opts);
+ ASSERT_OK(err, "XDP test run");
+
+ /* wait for the packets to be flushed */
+ kern_sync_rcu();
+
+ err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
+ ASSERT_OK(err, "XDP program detach");
+
/* can not attach BPF_XDP_DEVMAP program to a device */
err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_DEVMAP program"))
@@ -67,6 +91,8 @@ static void test_xdp_with_devmap_helpers(void)
ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry");
out_close:
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
test_xdp_with_devmap_helpers__destroy(skel);
}
@@ -124,6 +150,86 @@ out_close:
test_xdp_with_devmap_frags_helpers__destroy(skel);
}
+static void test_xdp_with_devmap_helpers_veth(void)
+{
+ struct test_xdp_with_devmap_helpers *skel = NULL;
+ struct bpf_prog_info info = {};
+ struct bpf_devmap_val val = {};
+ struct nstoken *nstoken = NULL;
+ __u32 len = sizeof(info);
+ int err, dm_fd, dm_fd_redir, map_fd, ifindex_dst;
+ char data[10] = {};
+ __u32 idx = 0;
+
+ SYS(out_close, "ip netns add %s", TEST_NS);
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto out_close;
+
+ SYS(out_close, "ip link add veth_src type veth peer name veth_dst");
+ SYS(out_close, "ip link set dev veth_src up");
+ SYS(out_close, "ip link set dev veth_dst up");
+
+ val.ifindex = if_nametoindex("veth_src");
+ ifindex_dst = if_nametoindex("veth_dst");
+ if (!ASSERT_NEQ(val.ifindex, 0, "val.ifindex") ||
+ !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
+ goto out_close;
+
+ skel = test_xdp_with_devmap_helpers__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
+ goto out_close;
+
+ dm_fd_redir = bpf_program__fd(skel->progs.xdp_redir_prog);
+ err = bpf_xdp_attach(val.ifindex, dm_fd_redir, XDP_FLAGS_DRV_MODE, NULL);
+ if (!ASSERT_OK(err, "Attach of program with 8-byte devmap"))
+ goto out_close;
+
+ dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
+ map_fd = bpf_map__fd(skel->maps.dm_ports);
+ err = bpf_prog_get_info_by_fd(dm_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ goto out_close;
+
+ val.bpf_prog.fd = dm_fd;
+ err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+ ASSERT_OK(err, "Add program to devmap entry");
+
+ err = bpf_map_lookup_elem(map_fd, &idx, &val);
+ ASSERT_OK(err, "Read devmap entry");
+ ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");
+
+ /* attach dummy to other side to enable reception */
+ dm_fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
+ err = bpf_xdp_attach(ifindex_dst, dm_fd, XDP_FLAGS_DRV_MODE, NULL);
+ if (!ASSERT_OK(err, "Attach of dummy XDP"))
+ goto out_close;
+
+ /* send a packet to trigger any potential bugs in there */
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = 10,
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = 1,
+ );
+ err = bpf_prog_test_run_opts(dm_fd_redir, &opts);
+ ASSERT_OK(err, "XDP test run");
+
+ /* wait for the packets to be flushed */
+ kern_sync_rcu();
+
+ err = bpf_xdp_detach(val.ifindex, XDP_FLAGS_DRV_MODE, NULL);
+ ASSERT_OK(err, "XDP program detach");
+
+ err = bpf_xdp_detach(ifindex_dst, XDP_FLAGS_DRV_MODE, NULL);
+ ASSERT_OK(err, "XDP program detach");
+
+out_close:
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+ test_xdp_with_devmap_helpers__destroy(skel);
+}
+
void serial_test_xdp_devmap_attach(void)
{
if (test__start_subtest("DEVMAP with programs in entries"))
@@ -134,4 +240,7 @@ void serial_test_xdp_devmap_attach(void)
if (test__start_subtest("Verifier check of DEVMAP programs"))
test_neg_xdp_devmap_helpers();
+
+ if (test__start_subtest("DEVMAP with programs in entries on veth"))
+ test_xdp_with_devmap_helpers_veth();
}
diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h
index b979e91f55f0..4ece7873ba60 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_common.h
+++ b/tools/testing/selftests/bpf/progs/cpumask_common.h
@@ -7,6 +7,11 @@
#include "errno.h"
#include <stdbool.h>
+/* Should use BTF_FIELDS_MAX, but it is not always available in vmlinux.h,
+ * so use the hard-coded number as a workaround.
+ */
+#define CPUMASK_KPTR_FIELDS_MAX 11
+
int err;
#define private(name) SEC(".bss." #name) __attribute__((aligned(8)))
diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c
index a988d2823b52..b40b52548ffb 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_failure.c
+++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c
@@ -10,6 +10,21 @@
char _license[] SEC("license") = "GPL";
+struct kptr_nested_array_2 {
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_array_1 {
+ /* Make btf_parse_fields() in map_create() return -E2BIG */
+ struct kptr_nested_array_2 d_2[CPUMASK_KPTR_FIELDS_MAX + 1];
+};
+
+struct kptr_nested_array {
+ struct kptr_nested_array_1 d_1;
+};
+
+private(MASK_NESTED) static struct kptr_nested_array global_mask_nested_arr;
+
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(task_newtask,
@@ -187,3 +202,23 @@ int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 c
return 0;
}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("has no valid kptr")
+int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask_nested_arr.d_1.d_2[CPUMASK_KPTR_FIELDS_MAX].mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
index fd8106831c32..80ee469b0b60 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_success.c
+++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
@@ -31,11 +31,59 @@ struct kptr_nested_deep {
struct kptr_nested_pair ptr_pairs[3];
};
+struct kptr_nested_deep_array_1_2 {
+ int dummy;
+ struct bpf_cpumask __kptr * mask[CPUMASK_KPTR_FIELDS_MAX];
+};
+
+struct kptr_nested_deep_array_1_1 {
+ int dummy;
+ struct kptr_nested_deep_array_1_2 d_2;
+};
+
+struct kptr_nested_deep_array_1 {
+ long dummy;
+ struct kptr_nested_deep_array_1_1 d_1;
+};
+
+struct kptr_nested_deep_array_2_2 {
+ long dummy[2];
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_deep_array_2_1 {
+ int dummy;
+ struct kptr_nested_deep_array_2_2 d_2[CPUMASK_KPTR_FIELDS_MAX];
+};
+
+struct kptr_nested_deep_array_2 {
+ long dummy;
+ struct kptr_nested_deep_array_2_1 d_1;
+};
+
+struct kptr_nested_deep_array_3_2 {
+ long dummy[2];
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_deep_array_3_1 {
+ int dummy;
+ struct kptr_nested_deep_array_3_2 d_2;
+};
+
+struct kptr_nested_deep_array_3 {
+ long dummy;
+ struct kptr_nested_deep_array_3_1 d_1[CPUMASK_KPTR_FIELDS_MAX];
+};
+
private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
private(MASK) static struct kptr_nested global_mask_nested[2];
private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
+private(MASK_1) static struct kptr_nested_deep_array_1 global_mask_nested_deep_array_1;
+private(MASK_2) static struct kptr_nested_deep_array_2 global_mask_nested_deep_array_2;
+private(MASK_3) static struct kptr_nested_deep_array_3 global_mask_nested_deep_array_3;
static bool is_test_task(void)
{
@@ -543,12 +591,21 @@ static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
goto err_exit;
}
- /* [<mask 0>, NULL] */
- if (!*mask0 || *mask1) {
+ /* [<mask 0>, *] */
+ if (!*mask0) {
err = 2;
goto err_exit;
}
+ if (!mask1)
+ goto err_exit;
+
+ /* [*, NULL] */
+ if (*mask1) {
+ err = 3;
+ goto err_exit;
+ }
+
local = create_cpumask();
if (!local) {
err = 9;
@@ -632,6 +689,23 @@ int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clo
}
SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags)
+{
+ int i;
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_1.d_1.d_2.mask[i], NULL);
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_2.d_1.d_2[i].mask, NULL);
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_3.d_1[i].d_2.mask, NULL);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local;
diff --git a/tools/testing/selftests/bpf/progs/kfunc_module_order.c b/tools/testing/selftests/bpf/progs/kfunc_module_order.c
new file mode 100644
index 000000000000..76003d04c95f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_module_order.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+extern int bpf_test_modorder_retx(void) __ksym;
+extern int bpf_test_modorder_rety(void) __ksym;
+
+SEC("classifier")
+int call_kfunc_xy(struct __sk_buff *skb)
+{
+ int ret1, ret2;
+
+ ret1 = bpf_test_modorder_retx();
+ ret2 = bpf_test_modorder_rety();
+
+ return ret1 == 'x' && ret2 == 'y' ? 0 : -1;
+}
+
+SEC("classifier")
+int call_kfunc_yx(struct __sk_buff *skb)
+{
+ int ret1, ret2;
+
+ ret1 = bpf_test_modorder_rety();
+ ret2 = bpf_test_modorder_retx();
+
+ return ret1 == 'y' && ret2 == 'x' ? 0 : -1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
index 4139a14f9996..92b65a485d4a 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
@@ -12,7 +12,7 @@ struct {
SEC("xdp")
int xdp_redir_prog(struct xdp_md *ctx)
{
- return bpf_redirect_map(&dm_ports, 1, 0);
+ return bpf_redirect_map(&dm_ports, 0, 0);
}
/* invalid program on DEVMAP entry;
diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
index f4da4d508ddb..156cc278e2fc 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
@@ -15,6 +15,8 @@ int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign,
int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak;
void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak;
+u64 bits_array[511] = {};
+
SEC("iter.s/cgroup")
__description("bits iter without destroy")
__failure __msg("Unreleased reference")
@@ -110,16 +112,16 @@ int bit_index(void)
}
SEC("syscall")
-__description("bits nomem")
+__description("bits too big")
__success __retval(0)
-int bits_nomem(void)
+int bits_too_big(void)
{
u64 data[4];
int nr = 0;
int *bit;
__builtin_memset(&data, 0xff, sizeof(data));
- bpf_for_each(bits, bit, &data[0], 513) /* Be greater than 512 */
+ bpf_for_each(bits, bit, &data[0], 512) /* Be greater than 511 */
nr++;
return nr;
}
@@ -151,3 +153,56 @@ int zero_words(void)
nr++;
return nr;
}
+
+SEC("syscall")
+__description("huge words")
+__success __retval(0)
+int huge_words(void)
+{
+ u64 data[8] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 67108865)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("max words")
+__success __retval(4)
+int max_words(void)
+{
+ volatile int nr = 0;
+ int *bit;
+
+ bits_array[0] = (1ULL << 63) | 1U;
+ bits_array[510] = (1ULL << 33) | (1ULL << 32);
+
+ bpf_for_each(bits, bit, bits_array, 511) {
+ if (nr == 0 && *bit != 0)
+ break;
+ if (nr == 2 && *bit != 32672)
+ break;
+ nr++;
+ }
+ return nr;
+}
+
+SEC("syscall")
+__description("bad words")
+__success __retval(0)
+int bad_words(void)
+{
+ void *bad_addr = (void *)(3UL << 30);
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, bad_addr, 1)
+ nr++;
+
+ bpf_for_each(bits, bit, bad_addr, 4)
+ nr++;
+
+ return nr;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
index 9da97d2efcd9..5094c288cfd7 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -790,61 +790,6 @@ __naked static void cumulative_stack_depth_subprog(void)
:: __imm(bpf_get_smp_processor_id) : __clobber_all);
}
-SEC("raw_tp")
-__arch_x86_64
-__log_level(4)
-__msg("stack depth 512")
-__xlated("0: r1 = 42")
-__xlated("1: *(u64 *)(r10 -512) = r1")
-__xlated("2: w0 = ")
-__xlated("3: r0 = &(void __percpu *)(r0)")
-__xlated("4: r0 = *(u32 *)(r0 +0)")
-__xlated("5: exit")
-__success
-__naked int bpf_fastcall_max_stack_ok(void)
-{
- asm volatile(
- "r1 = 42;"
- "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
- "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
- "call %[bpf_get_smp_processor_id];"
- "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
- "exit;"
- :
- : __imm_const(max_bpf_stack, MAX_BPF_STACK),
- __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
- __imm(bpf_get_smp_processor_id)
- : __clobber_all
- );
-}
-
-SEC("raw_tp")
-__arch_x86_64
-__log_level(4)
-__msg("stack depth 520")
-__failure
-__naked int bpf_fastcall_max_stack_fail(void)
-{
- asm volatile(
- "r1 = 42;"
- "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
- "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
- "call %[bpf_get_smp_processor_id];"
- "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
- /* call to prandom blocks bpf_fastcall rewrite */
- "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
- "call %[bpf_get_prandom_u32];"
- "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
- "exit;"
- :
- : __imm_const(max_bpf_stack, MAX_BPF_STACK),
- __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
- __imm(bpf_get_smp_processor_id),
- __imm(bpf_get_prandom_u32)
- : __clobber_all
- );
-}
-
SEC("cgroup/getsockname_unix")
__xlated("0: r2 = 1")
/* bpf_cast_to_kern_ctx is replaced by a single assignment */
diff --git a/tools/testing/selftests/bpf/progs/verifier_const.c b/tools/testing/selftests/bpf/progs/verifier_const.c
index 2e533d7eec2f..e118dbb768bf 100644
--- a/tools/testing/selftests/bpf/progs/verifier_const.c
+++ b/tools/testing/selftests/bpf/progs/verifier_const.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Isovalent */
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
const volatile long foo = 42;
@@ -66,4 +67,32 @@ int tcx6(struct __sk_buff *skb)
return TCX_PASS;
}
+static inline void write_fixed(volatile void *p, __u32 val)
+{
+ *(volatile __u32 *)p = val;
+}
+
+static inline void write_dyn(void *p, void *val, int len)
+{
+ bpf_copy_from_user(p, len, val);
+}
+
+SEC("tc/ingress")
+__description("rodata/mark: write with unknown reg rejected")
+__failure __msg("write into map forbidden")
+int tcx7(struct __sk_buff *skb)
+{
+ write_fixed((void *)&foo, skb->mark);
+ return TCX_PASS;
+}
+
+SEC("lsm.s/bprm_committed_creds")
+__description("rodata/mark: write with unknown reg rejected")
+__failure __msg("write into map forbidden")
+int BPF_PROG(bprm, struct linux_binprm *bprm)
+{
+ write_dyn((void *)&foo, &bart, bpf_get_prandom_u32() & 3);
+ return 0;
+}
+
char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
new file mode 100644
index 000000000000..8f755d2464cf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("scalars: find linked scalars")
+__failure
+__msg("math between fp pointer and 2147483647 is not allowed")
+__naked void scalars(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = 0x80000001 ll; \
+ r1 /= 1; \
+ r2 = r1; \
+ r4 = r1; \
+ w2 += 0x7FFFFFFF; \
+ w4 += 0; \
+ if r2 == 0 goto l1; \
+ exit; \
+l1: \
+ r4 >>= 63; \
+ r3 = 1; \
+ r3 -= r4; \
+ r3 *= 0x7FFFFFFF; \
+ r3 += r10; \
+ *(u8*)(r3 - 1) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
index 028ec855587b..994bbc346d25 100644
--- a/tools/testing/selftests/bpf/progs/verifier_movsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -287,6 +287,46 @@ l0_%=: \
: __clobber_all);
}
+SEC("socket")
+__description("MOV64SX, S8, unsigned range_check")
+__success __retval(0)
+__naked void mov64sx_s8_range_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x1; \
+ r0 += 0xfe; \
+ r0 = (s8)r0; \
+ if r0 < 0xfffffffffffffffe goto label_%=; \
+ r0 = 0; \
+ exit; \
+label_%=: \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, unsigned range_check")
+__success __retval(0)
+__naked void mov32sx_s8_range_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0x1; \
+ w0 += 0xfe; \
+ w0 = (s8)w0; \
+ if w0 < 0xfffffffe goto label_%=; \
+ r0 = 0; \
+ exit; \
+label_%=: \
+ exit; \
+ " :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
#else
SEC("socket")
diff --git a/tools/testing/selftests/bpf/progs/verifier_mtu.c b/tools/testing/selftests/bpf/progs/verifier_mtu.c
new file mode 100644
index 000000000000..70c7600a26a0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_mtu.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("tc/ingress")
+__description("uninit/mtu: write rejected")
+__failure __msg("invalid indirect read from stack")
+int tc_uninit_mtu(struct __sk_buff *ctx)
+{
+ __u32 mtu;
+
+ bpf_check_mtu(ctx, 0, &mtu, 0, 0);
+ return TCX_PASS;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
index 2ecf77b623e0..7c5e5e6d10eb 100644
--- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
@@ -760,4 +760,71 @@ __naked void two_old_ids_one_cur_id(void)
: __clobber_all);
}
+SEC("socket")
+/* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */
+__flag(BPF_F_TEST_RND_HI32)
+__success
+/* This test was added because of a bug in verifier.c:sync_linked_regs(),
+ * upon range propagation it destroyed subreg_def marks for registers.
+ * The subreg_def mark is used to decide whether zero extension instructions
+ * are needed when register is read. When BPF_F_TEST_RND_HI32 is set it
+ * also causes generation of statements to randomize upper halves of
+ * read registers.
+ *
+ * The test is written in a way to return an upper half of a register
+ * that is affected by range propagation and must have it's subreg_def
+ * preserved. This gives a return value of 0 and leads to undefined
+ * return value if subreg_def mark is not preserved.
+ */
+__retval(0)
+/* Check that verifier believes r1/r0 are zero at exit */
+__log_level(2)
+__msg("4: (77) r1 >>= 32 ; R1_w=0")
+__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
+__msg("6: (95) exit")
+__msg("from 3 to 4")
+__msg("4: (77) r1 >>= 32 ; R1_w=0")
+__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
+__msg("6: (95) exit")
+/* Verify that statements to randomize upper half of r1 had not been
+ * generated.
+ */
+__xlated("call unknown")
+__xlated("r0 &= 2147483647")
+__xlated("w1 = w0")
+/* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm
+ * are the only CI archs that do not need zero extension for subregs.
+ */
+#if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64)
+__xlated("w1 = w1")
+#endif
+__xlated("if w0 < 0xa goto pc+0")
+__xlated("r1 >>= 32")
+__xlated("r0 = r1")
+__xlated("exit")
+__naked void linked_regs_and_subreg_def(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ /* make sure r0 is in 32-bit range, otherwise w1 = w0 won't
+ * assign same IDs to registers.
+ */
+ "r0 &= 0x7fffffff;"
+ /* link w1 and w0 via ID */
+ "w1 = w0;"
+ /* 'if' statement propagates range info from w0 to w1,
+ * but should not affect w1->subreg_def property.
+ */
+ "if w0 < 10 goto +0;"
+ /* r1 is read here, on archs that require subreg zero
+ * extension this would cause zext patch generation.
+ */
+ "r1 >>= 32;"
+ "r0 = r1;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c
index 5a14498d352f..f40e57251e94 100644
--- a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c
+++ b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c
@@ -2,6 +2,7 @@
/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */
#include <linux/bpf.h>
+#include <../../../include/linux/filter.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
@@ -336,4 +337,26 @@ l0_%=: r1 = 42; \
: __clobber_all);
}
+/* Without checkpoint forcibly inserted at the back-edge a loop this
+ * test would take a very long time to verify.
+ */
+SEC("kprobe")
+__failure __log_level(4)
+__msg("BPF program is too large.")
+__naked void short_loop1(void)
+{
+ asm volatile (
+ " r7 = *(u16 *)(r1 +0);"
+ "1: r7 += 0x1ab064b9;"
+ " .8byte %[jset];" /* same as 'if r7 & 0x702000 goto 1b;' */
+ " r7 &= 0x1ee60e;"
+ " r7 += r1;"
+ " if r7 s> 0x37d2 goto +0;"
+ " r0 = 0;"
+ " exit;"
+ :
+ : __imm_insn(jset, BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x702000, -2))
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index d3c3c3a24150..5e9f16683be5 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -367,7 +367,7 @@ int delete_module(const char *name, int flags)
return syscall(__NR_delete_module, name, flags);
}
-int unload_bpf_testmod(bool verbose)
+int unload_module(const char *name, bool verbose)
{
int ret, cnt = 0;
@@ -375,11 +375,11 @@ int unload_bpf_testmod(bool verbose)
fprintf(stdout, "Failed to trigger kernel-side RCU sync!\n");
for (;;) {
- ret = delete_module("bpf_testmod", 0);
+ ret = delete_module(name, 0);
if (!ret || errno != EAGAIN)
break;
if (++cnt > 10000) {
- fprintf(stdout, "Unload of bpf_testmod timed out\n");
+ fprintf(stdout, "Unload of %s timed out\n", name);
break;
}
usleep(100);
@@ -388,41 +388,51 @@ int unload_bpf_testmod(bool verbose)
if (ret) {
if (errno == ENOENT) {
if (verbose)
- fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
+ fprintf(stdout, "%s.ko is already unloaded.\n", name);
return -1;
}
- fprintf(stdout, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
+ fprintf(stdout, "Failed to unload %s.ko from kernel: %d\n", name, -errno);
return -1;
}
if (verbose)
- fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
+ fprintf(stdout, "Successfully unloaded %s.ko.\n", name);
return 0;
}
-int load_bpf_testmod(bool verbose)
+int load_module(const char *path, bool verbose)
{
int fd;
if (verbose)
- fprintf(stdout, "Loading bpf_testmod.ko...\n");
+ fprintf(stdout, "Loading %s...\n", path);
- fd = open("bpf_testmod.ko", O_RDONLY);
+ fd = open(path, O_RDONLY);
if (fd < 0) {
- fprintf(stdout, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
+ fprintf(stdout, "Can't find %s kernel module: %d\n", path, -errno);
return -ENOENT;
}
if (finit_module(fd, "", 0)) {
- fprintf(stdout, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
+ fprintf(stdout, "Failed to load %s into the kernel: %d\n", path, -errno);
close(fd);
return -EINVAL;
}
close(fd);
if (verbose)
- fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
+ fprintf(stdout, "Successfully loaded %s.\n", path);
return 0;
}
+int unload_bpf_testmod(bool verbose)
+{
+ return unload_module("bpf_testmod", verbose);
+}
+
+int load_bpf_testmod(bool verbose)
+{
+ return load_module("bpf_testmod.ko", verbose);
+}
+
/*
* Trigger synchronize_rcu() in kernel.
*/
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index d55f6ab12433..46d7f7089f63 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -38,6 +38,8 @@ int unload_bpf_testmod(bool verbose);
int kern_sync_rcu(void);
int finit_module(int fd, const char *param_values, int flags);
int delete_module(const char *name, int flags);
+int load_module(const char *path, bool verbose);
+int unload_module(const char *name, bool verbose);
static inline __u64 get_time_ns(void)
{
diff --git a/tools/testing/selftests/bpf/veristat.cfg b/tools/testing/selftests/bpf/veristat.cfg
index 1a385061618d..e661ffdcaadf 100644
--- a/tools/testing/selftests/bpf/veristat.cfg
+++ b/tools/testing/selftests/bpf/veristat.cfg
@@ -15,3 +15,4 @@ test_usdt*
test_verif_scale*
test_xdp_noinline*
xdp_synproxy*
+verifier_search_pruning*
diff --git a/tools/testing/selftests/hid/Makefile b/tools/testing/selftests/hid/Makefile
index 38ae31bb07b5..662209f5fabc 100644
--- a/tools/testing/selftests/hid/Makefile
+++ b/tools/testing/selftests/hid/Makefile
@@ -18,6 +18,7 @@ TEST_PROGS += hid-usb_crash.sh
TEST_PROGS += hid-wacom.sh
TEST_FILES := run-hid-tools-tests.sh
+TEST_FILES += tests
CXX ?= $(CROSS_COMPILE)g++
diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh
index e7008f614ad7..6a3b8503264e 100755
--- a/tools/testing/selftests/intel_pstate/run.sh
+++ b/tools/testing/selftests/intel_pstate/run.sh
@@ -44,6 +44,11 @@ if [ $UID != 0 ] && [ $EVALUATE_ONLY == 0 ]; then
exit $ksft_skip
fi
+if ! command -v cpupower &> /dev/null; then
+ echo $msg cpupower could not be found, please install it >&2
+ exit $ksft_skip
+fi
+
max_cpus=$(($(nproc)-1))
function run_test () {
@@ -87,9 +92,9 @@ mkt_freq=${_mkt_freq}0
# Get the ranges from cpupower
_min_freq=$(cpupower frequency-info -l | tail -1 | awk ' { print $1 } ')
-min_freq=$(($_min_freq / 1000))
+min_freq=$((_min_freq / 1000))
_max_freq=$(cpupower frequency-info -l | tail -1 | awk ' { print $2 } ')
-max_freq=$(($_max_freq / 1000))
+max_freq=$((_max_freq / 1000))
[ $EVALUATE_ONLY -eq 0 ] && for freq in `seq $max_freq -100 $min_freq`
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 960cf6a77198..156fbfae940f 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -248,6 +248,9 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
ifeq ($(ARCH),s390)
CFLAGS += -march=z10
endif
+ifeq ($(ARCH),x86)
+ CFLAGS += -march=x86-64-v2
+endif
ifeq ($(ARCH),arm64)
tools_dir := $(top_srcdir)/tools
arm64_tools_dir := $(tools_dir)/arch/arm64/tools/
diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
index 2a3fe7914b72..b87e53580bfc 100644
--- a/tools/testing/selftests/kvm/aarch64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
@@ -68,6 +68,8 @@ struct test_feature_reg {
}
static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
+ S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DoubleLock, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, WRPs, 0),
S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP),
REG_FTR_END,
@@ -134,6 +136,13 @@ static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
REG_FTR_END,
};
+static const struct reg_ftr_bits ftr_id_aa64pfr1_el1[] = {
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, CSV2_frac, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, SSBS, ID_AA64PFR1_EL1_SSBS_NI),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, BT, 0),
+ REG_FTR_END,
+};
+
static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
@@ -200,6 +209,7 @@ static struct test_feature_reg test_regs[] = {
TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
+ TEST_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1_el1),
TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
@@ -569,9 +579,9 @@ int main(void)
test_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
- ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
- ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
- ARRAY_SIZE(test_regs) + 2;
+ ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
+ ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
+ ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2;
ksft_set_plan(test_cnt);
diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
index 8c579ce714e9..fec03b11b059 100644
--- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
@@ -60,7 +60,7 @@ static bool is_cpuid_mangled(const struct kvm_cpuid_entry2 *entrie)
{
int i;
- for (i = 0; i < sizeof(mangled_cpuids); i++) {
+ for (i = 0; i < ARRAY_SIZE(mangled_cpuids); i++) {
if (mangled_cpuids[i].function == entrie->function &&
mangled_cpuids[i].index == entrie->index)
return true;
diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c
index 56d4480e8d3c..8a4d34cce36b 100644
--- a/tools/testing/selftests/mm/khugepaged.c
+++ b/tools/testing/selftests/mm/khugepaged.c
@@ -1091,7 +1091,7 @@ static void usage(void)
fprintf(stderr, "\n\t\"file,all\" mem_type requires kernel built with\n");
fprintf(stderr, "\tCONFIG_READ_ONLY_THP_FOR_FS=y\n");
fprintf(stderr, "\n\tif [dir] is a (sub)directory of a tmpfs mount, tmpfs must be\n");
- fprintf(stderr, "\tmounted with huge=madvise option for khugepaged tests to work\n");
+ fprintf(stderr, "\tmounted with huge=advise option for khugepaged tests to work\n");
fprintf(stderr, "\n\tSupported Options:\n");
fprintf(stderr, "\t\t-h: This help message.\n");
fprintf(stderr, "\t\t-s: mTHP size, expressed as page order.\n");
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index b3d21eed203d..a2e71b1636e7 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -241,6 +241,8 @@ static void *fork_event_consumer(void *data)
fork_event_args *args = data;
struct uffd_msg msg = { 0 };
+ ready_for_fork = true;
+
/* Read until a full msg received */
while (uffd_read_msg(args->parent_uffd, &msg));
@@ -308,8 +310,11 @@ static int pagemap_test_fork(int uffd, bool with_event, bool test_pin)
/* Prepare a thread to resolve EVENT_FORK */
if (with_event) {
+ ready_for_fork = false;
if (pthread_create(&thread, NULL, fork_event_consumer, &args))
err("pthread_create()");
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
}
child = fork();
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
index c6a8c732b802..68801e1a9ec2 100644
--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -1414,6 +1414,13 @@ TEST_F(mount_setattr_idmapped, idmap_mount_tree_invalid)
ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/b", 0, 0, 0), 0);
ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/BB/b", 0, 0, 0), 0);
+ ASSERT_EQ(mount("testing", "/mnt/A", "ramfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/mnt/A/AA", 0777), 0);
+
+ ASSERT_EQ(mount("/tmp", "/mnt/A/AA", NULL, MS_BIND | MS_REC, NULL), 0);
+
open_tree_fd = sys_open_tree(-EBADF, "/mnt/A",
AT_RECURSIVE |
AT_EMPTY_PATH |
@@ -1433,6 +1440,8 @@ TEST_F(mount_setattr_idmapped, idmap_mount_tree_invalid)
ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/BB/b", 0, 0, 0), 0);
ASSERT_EQ(expected_uid_gid(open_tree_fd, "B/b", 0, 0, 0), 0);
ASSERT_EQ(expected_uid_gid(open_tree_fd, "B/BB/b", 0, 0, 0), 0);
+
+ (void)umount2("/mnt/A", MNT_DETACH);
}
TEST_F(mount_setattr, mount_attr_nosymfollow)
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat.sh
index 96c97064f2d3..becc7c3fc809 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_flat.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_flat.sh
@@ -8,6 +8,7 @@
ALL_TESTS="
gre_flat
gre_mtu_change
+ gre_flat_remote_change
"
NUM_NETIFS=6
@@ -44,6 +45,19 @@ gre_mtu_change()
test_mtu_change
}
+gre_flat_remote_change()
+{
+ flat_remote_change
+
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 (new remote)"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 (new remote)"
+
+ flat_remote_restore
+
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 (old remote)"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 (old remote)"
+}
+
cleanup()
{
pre_cleanup
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh
index ff9fb0db9bd1..e5335116a2fd 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh
@@ -8,6 +8,7 @@
ALL_TESTS="
gre_flat
gre_mtu_change
+ gre_flat_remote_change
"
NUM_NETIFS=6
@@ -44,6 +45,19 @@ gre_mtu_change()
test_mtu_change
}
+gre_flat_remote_change()
+{
+ flat_remote_change
+
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 with key (new remote)"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 with key (new remote)"
+
+ flat_remote_restore
+
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 with key (old remote)"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 with key (old remote)"
+}
+
cleanup()
{
pre_cleanup
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh
index 12c138785242..7e0cbfdefab0 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh
@@ -8,6 +8,7 @@
ALL_TESTS="
gre_flat
gre_mtu_change
+ gre_flat_remote_change
"
NUM_NETIFS=6
@@ -44,6 +45,19 @@ gre_mtu_change()
test_mtu_change gre
}
+gre_flat_remote_change()
+{
+ flat_remote_change
+
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 with ikey/okey (new remote)"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 with ikey/okey (new remote)"
+
+ flat_remote_restore
+
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 with ikey/okey (old remote)"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 with ikey/okey (old remote)"
+}
+
cleanup()
{
pre_cleanup
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier.sh
index 83b55c30a5c3..e0844495f3d1 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_hier.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_hier.sh
@@ -8,6 +8,7 @@
ALL_TESTS="
gre_hier
gre_mtu_change
+ gre_hier_remote_change
"
NUM_NETIFS=6
@@ -44,6 +45,19 @@ gre_mtu_change()
test_mtu_change gre
}
+gre_hier_remote_change()
+{
+ hier_remote_change
+
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 (new remote)"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 (new remote)"
+
+ hier_remote_restore
+
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 (old remote)"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 (old remote)"
+}
+
cleanup()
{
pre_cleanup
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh
index 256607916d92..741bc9c928eb 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh
@@ -8,6 +8,7 @@
ALL_TESTS="
gre_hier
gre_mtu_change
+ gre_hier_remote_change
"
NUM_NETIFS=6
@@ -44,6 +45,19 @@ gre_mtu_change()
test_mtu_change gre
}
+gre_hier_remote_change()
+{
+ hier_remote_change
+
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 with key (new remote)"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 with key (new remote)"
+
+ hier_remote_restore
+
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 with key (old remote)"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 with key (old remote)"
+}
+
cleanup()
{
pre_cleanup
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh
index ad1bcd6334a8..ad9eab4b1367 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh
@@ -8,6 +8,7 @@
ALL_TESTS="
gre_hier
gre_mtu_change
+ gre_hier_remote_change
"
NUM_NETIFS=6
@@ -44,6 +45,19 @@ gre_mtu_change()
test_mtu_change gre
}
+gre_hier_remote_change()
+{
+ hier_remote_change
+
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 with ikey/okey (new remote)"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 with ikey/okey (new remote)"
+
+ hier_remote_restore
+
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 with ikey/okey (old remote)"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 with ikey/okey (old remote)"
+}
+
cleanup()
{
pre_cleanup
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_lib.sh b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
index 24f4ab328bd2..2d91281dc5b7 100644
--- a/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
@@ -436,3 +436,83 @@ test_mtu_change()
check_err $?
log_test "ping GRE IPv6, packet size 1800 after MTU change"
}
+
+topo_flat_remote_change()
+{
+ local old1=$1; shift
+ local new1=$1; shift
+ local old2=$1; shift
+ local new2=$1; shift
+
+ ip link set dev g1a type ip6gre local $new1 remote $new2
+ __addr_add_del g1a add "$new1/128"
+ __addr_add_del g1a del "$old1/128"
+ ip -6 route add $new2/128 via 2001:db8:10::2
+ ip -6 route del $old2/128
+
+ ip link set dev g2a type ip6gre local $new2 remote $new1
+ __addr_add_del g2a add "$new2/128"
+ __addr_add_del g2a del "$old2/128"
+ ip -6 route add vrf v$ol2 $new1/128 via 2001:db8:10::1
+ ip -6 route del vrf v$ol2 $old1/128
+}
+
+flat_remote_change()
+{
+ local old1=2001:db8:3::1
+ local new1=2001:db8:3::10
+ local old2=2001:db8:3::2
+ local new2=2001:db8:3::20
+
+ topo_flat_remote_change $old1 $new1 $old2 $new2
+}
+
+flat_remote_restore()
+{
+ local old1=2001:db8:3::10
+ local new1=2001:db8:3::1
+ local old2=2001:db8:3::20
+ local new2=2001:db8:3::2
+
+ topo_flat_remote_change $old1 $new1 $old2 $new2
+}
+
+topo_hier_remote_change()
+{
+ local old1=$1; shift
+ local new1=$1; shift
+ local old2=$1; shift
+ local new2=$1; shift
+
+ __addr_add_del dummy1 del "$old1/64"
+ __addr_add_del dummy1 add "$new1/64"
+ ip link set dev g1a type ip6gre local $new1 remote $new2
+ ip -6 route add vrf v$ul1 $new2/128 via 2001:db8:10::2
+ ip -6 route del vrf v$ul1 $old2/128
+
+ __addr_add_del dummy2 del "$old2/64"
+ __addr_add_del dummy2 add "$new2/64"
+ ip link set dev g2a type ip6gre local $new2 remote $new1
+ ip -6 route add vrf v$ul2 $new1/128 via 2001:db8:10::1
+ ip -6 route del vrf v$ul2 $old1/128
+}
+
+hier_remote_change()
+{
+ local old1=2001:db8:3::1
+ local new1=2001:db8:3::10
+ local old2=2001:db8:3::2
+ local new2=2001:db8:3::20
+
+ topo_hier_remote_change $old1 $new1 $old2 $new2
+}
+
+hier_remote_restore()
+{
+ local old1=2001:db8:3::10
+ local new1=2001:db8:3::1
+ local old2=2001:db8:3::20
+ local new2=2001:db8:3::2
+
+ topo_hier_remote_change $old1 $new1 $old2 $new2
+}
diff --git a/tools/testing/selftests/net/lib/py/nsim.py b/tools/testing/selftests/net/lib/py/nsim.py
index f571a8b3139b..1a8cbe9acc48 100644
--- a/tools/testing/selftests/net/lib/py/nsim.py
+++ b/tools/testing/selftests/net/lib/py/nsim.py
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+import errno
import json
import os
import random
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 57325d57e4c6..b48b4e56826a 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -259,6 +259,15 @@ check_mptcp_disabled()
mptcp_lib_ns_init disabled_ns
print_larger_title "New MPTCP socket can be blocked via sysctl"
+
+ # mainly to cover more code
+ if ! ip netns exec ${disabled_ns} sysctl net.mptcp >/dev/null; then
+ mptcp_lib_pr_fail "not able to list net.mptcp sysctl knobs"
+ mptcp_lib_result_fail "not able to list net.mptcp sysctl knobs"
+ ret=${KSFT_FAIL}
+ return 1
+ fi
+
# net.mptcp.enabled should be enabled by default
if [ "$(ip netns exec ${disabled_ns} sysctl net.mptcp.enabled | awk '{ print $3 }')" -ne 1 ]; then
mptcp_lib_pr_fail "net.mptcp.enabled sysctl is not 1 by default"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index e8d0a01b4144..c07e2bd3a315 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -23,6 +23,7 @@ tmpfile=""
cout=""
err=""
capout=""
+cappid=""
ns1=""
ns2=""
iptables="iptables"
@@ -887,40 +888,62 @@ check_cestab()
fi
}
-do_transfer()
+cond_start_capture()
{
- local listener_ns="$1"
- local connector_ns="$2"
- local cl_proto="$3"
- local srv_proto="$4"
- local connect_addr="$5"
-
- local port=$((10000 + MPTCP_LIB_TEST_COUNTER - 1))
- local cappid
- local FAILING_LINKS=${FAILING_LINKS:-""}
- local fastclose=${fastclose:-""}
- local speed=${speed:-"fast"}
+ local ns="$1"
- :> "$cout"
- :> "$sout"
:> "$capout"
if $capture; then
- local capuser
- if [ -z $SUDO_USER ] ; then
+ local capuser capfile
+ if [ -z $SUDO_USER ]; then
capuser=""
else
capuser="-Z $SUDO_USER"
fi
- capfile=$(printf "mp_join-%02u-%s.pcap" "$MPTCP_LIB_TEST_COUNTER" "${listener_ns}")
+ capfile=$(printf "mp_join-%02u-%s.pcap" "$MPTCP_LIB_TEST_COUNTER" "$ns")
echo "Capturing traffic for test $MPTCP_LIB_TEST_COUNTER into $capfile"
- ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
+ ip netns exec "$ns" tcpdump -i any -s 65535 -B 32768 $capuser -w "$capfile" > "$capout" 2>&1 &
cappid=$!
sleep 1
fi
+}
+
+cond_stop_capture()
+{
+ if $capture; then
+ sleep 1
+ kill $cappid
+ cat "$capout"
+ fi
+}
+
+get_port()
+{
+ echo "$((10000 + MPTCP_LIB_TEST_COUNTER - 1))"
+}
+
+do_transfer()
+{
+ local listener_ns="$1"
+ local connector_ns="$2"
+ local cl_proto="$3"
+ local srv_proto="$4"
+ local connect_addr="$5"
+ local port
+
+ local FAILING_LINKS=${FAILING_LINKS:-""}
+ local fastclose=${fastclose:-""}
+ local speed=${speed:-"fast"}
+ port=$(get_port)
+
+ :> "$cout"
+ :> "$sout"
+
+ cond_start_capture ${listener_ns}
NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
nstat -n
@@ -1007,10 +1030,7 @@ do_transfer()
wait $spid
local rets=$?
- if $capture; then
- sleep 1
- kill $cappid
- fi
+ cond_stop_capture
NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
nstat | grep Tcp > /tmp/${listener_ns}.out
@@ -1026,7 +1046,6 @@ do_transfer()
ip netns exec ${connector_ns} ss -Menita 1>&2 -o "dport = :$port"
cat /tmp/${connector_ns}.out
- cat "$capout"
return 1
fi
@@ -1043,13 +1062,7 @@ do_transfer()
fi
rets=$?
- if [ $retc -eq 0 ] && [ $rets -eq 0 ];then
- cat "$capout"
- return 0
- fi
-
- cat "$capout"
- return 1
+ [ $retc -eq 0 ] && [ $rets -eq 0 ]
}
make_file()
@@ -2873,6 +2886,32 @@ verify_listener_events()
fail_test
}
+chk_mpc_endp_attempt()
+{
+ local retl=$1
+ local attempts=$2
+
+ print_check "Connect"
+
+ if [ ${retl} = 124 ]; then
+ fail_test "timeout on connect"
+ elif [ ${retl} = 0 ]; then
+ fail_test "unexpected successful connect"
+ else
+ print_ok
+
+ print_check "Attempts"
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPCapableEndpAttempt")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$attempts" ]; then
+ fail_test "got ${count} MPC attempt[s] on port-based endpoint, expected ${attempts}"
+ else
+ print_ok
+ fi
+ fi
+}
+
add_addr_ports_tests()
{
# signal address with port
@@ -2963,6 +3002,22 @@ add_addr_ports_tests()
chk_join_nr 2 2 2
chk_add_nr 2 2 2
fi
+
+ if reset "port-based signal endpoint must not accept mpc"; then
+ local port retl count
+ port=$(get_port)
+
+ cond_start_capture ${ns1}
+ pm_nl_add_endpoint ${ns1} 10.0.2.1 flags signal port ${port}
+ mptcp_lib_wait_local_port_listen ${ns1} ${port}
+
+ timeout 1 ip netns exec ${ns2} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s MPTCP 10.0.2.1 >/dev/null 2>&1
+ retl=$?
+ cond_stop_capture
+
+ chk_mpc_endp_attempt ${retl} 1
+ fi
}
syncookies_tests()
diff --git a/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c b/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
index dc056fec993b..254ff03297f0 100644
--- a/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
+++ b/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
@@ -98,7 +98,7 @@ static int conntrack_data_insert(struct mnl_socket *sock, struct nlmsghdr *nlh,
char buf[MNL_SOCKET_BUFFER_SIZE];
struct nlmsghdr *rplnlh;
unsigned int portid;
- int err, ret;
+ int ret;
portid = mnl_socket_get_portid(sock);
@@ -217,7 +217,7 @@ static int conntracK_count_zone(struct mnl_socket *sock, uint16_t zone)
struct nfgenmsg *nfh;
struct nlattr *nest;
unsigned int portid;
- int err, ret;
+ int ret;
portid = mnl_socket_get_portid(sock);
@@ -264,7 +264,7 @@ static int conntrack_flush_zone(struct mnl_socket *sock, uint16_t zone)
struct nfgenmsg *nfh;
struct nlattr *nest;
unsigned int portid;
- int err, ret;
+ int ret;
portid = mnl_socket_get_portid(sock);
diff --git a/tools/testing/selftests/net/netfilter/nft_flowtable.sh b/tools/testing/selftests/net/netfilter/nft_flowtable.sh
index b3995550856a..a4ee5496f2a1 100755
--- a/tools/testing/selftests/net/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/net/netfilter/nft_flowtable.sh
@@ -71,6 +71,8 @@ omtu=9000
lmtu=1500
rmtu=2000
+filesize=$((2 * 1024 * 1024))
+
usage(){
echo "nft_flowtable.sh [OPTIONS]"
echo
@@ -81,12 +83,13 @@ usage(){
exit 1
}
-while getopts "o:l:r:" o
+while getopts "o:l:r:s:" o
do
case $o in
o) omtu=$OPTARG;;
l) lmtu=$OPTARG;;
r) rmtu=$OPTARG;;
+ s) filesize=$OPTARG;;
*) usage;;
esac
done
@@ -217,18 +220,10 @@ ns2out=$(mktemp)
make_file()
{
- name=$1
-
- SIZE=$((RANDOM % (1024 * 128)))
- SIZE=$((SIZE + (1024 * 8)))
- TSIZE=$((SIZE * 1024))
-
- dd if=/dev/urandom of="$name" bs=1024 count=$SIZE 2> /dev/null
+ name="$1"
+ sz="$2"
- SIZE=$((RANDOM % 1024))
- SIZE=$((SIZE + 128))
- TSIZE=$((TSIZE + SIZE))
- dd if=/dev/urandom conf=notrunc of="$name" bs=1 count=$SIZE 2> /dev/null
+ head -c "$sz" < /dev/urandom > "$name"
}
check_counters()
@@ -246,18 +241,18 @@ check_counters()
local fs
fs=$(du -sb "$nsin")
local max_orig=${fs%%/*}
- local max_repl=$((max_orig/4))
+ local max_repl=$((max_orig))
# flowtable fastpath should bypass normal routing one, i.e. the counters in forward hook
# should always be lower than the size of the transmitted file (max_orig).
if [ "$orig_cnt" -gt "$max_orig" ];then
- echo "FAIL: $what: original counter $orig_cnt exceeds expected value $max_orig" 1>&2
+ echo "FAIL: $what: original counter $orig_cnt exceeds expected value $max_orig, reply counter $repl_cnt" 1>&2
ret=1
ok=0
fi
if [ "$repl_cnt" -gt $max_repl ];then
- echo "FAIL: $what: reply counter $repl_cnt exceeds expected value $max_repl" 1>&2
+ echo "FAIL: $what: reply counter $repl_cnt exceeds expected value $max_repl, original counter $orig_cnt" 1>&2
ret=1
ok=0
fi
@@ -455,7 +450,7 @@ test_tcp_forwarding_nat()
return $lret
}
-make_file "$nsin"
+make_file "$nsin" "$filesize"
# First test:
# No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed.
@@ -664,8 +659,16 @@ if [ "$1" = "" ]; then
l=$(((RANDOM%mtu) + low))
r=$(((RANDOM%mtu) + low))
- echo "re-run with random mtus: -o $o -l $l -r $r"
- $0 -o "$o" -l "$l" -r "$r"
+ MINSIZE=$((2 * 1000 * 1000))
+ MAXSIZE=$((64 * 1000 * 1000))
+
+ filesize=$(((RANDOM * RANDOM) % MAXSIZE))
+ if [ "$filesize" -lt "$MINSIZE" ]; then
+ filesize=$((filesize+MINSIZE))
+ fi
+
+ echo "re-run with random mtus and file size: -o $o -l $l -r $r -s $filesize"
+ $0 -o "$o" -l "$l" -r "$r" -s "$filesize"
fi
exit $ret
diff --git a/tools/testing/selftests/net/rds/test.py b/tools/testing/selftests/net/rds/test.py
index e6bb109bcead..4a7178d11193 100755
--- a/tools/testing/selftests/net/rds/test.py
+++ b/tools/testing/selftests/net/rds/test.py
@@ -14,8 +14,11 @@ import sys
import atexit
from pwd import getpwuid
from os import stat
-from lib.py import ip
+# Allow utils module to be imported from different directory
+this_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(this_dir, "../"))
+from lib.py.utils import ip
libc = ctypes.cdll.LoadLibrary('libc.so.6')
setns = libc.setns
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 0754a2c110a1..011762224600 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -3,23 +3,12 @@
include ../../../build/Build.include
include ../../../scripts/Makefile.arch
include ../../../scripts/Makefile.include
-include ../lib.mk
-ifneq ($(LLVM),)
-ifneq ($(filter %/,$(LLVM)),)
-LLVM_PREFIX := $(LLVM)
-else ifneq ($(filter -%,$(LLVM)),)
-LLVM_SUFFIX := $(LLVM)
-endif
-
-CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
-else
-CC := gcc
-endif # LLVM
+TEST_GEN_PROGS := runner
-ifneq ($(CROSS_COMPILE),)
-$(error CROSS_COMPILE not supported for scx selftests)
-endif # CROSS_COMPILE
+# override lib.mk's default rules
+OVERRIDE_TARGETS := 1
+include ../lib.mk
CURDIR := $(abspath .)
REPOROOT := $(abspath ../../../..)
@@ -34,18 +23,23 @@ GENHDR := $(GENDIR)/autoconf.h
SCXTOOLSDIR := $(TOOLSDIR)/sched_ext
SCXTOOLSINCDIR := $(TOOLSDIR)/sched_ext/include
-OUTPUT_DIR := $(CURDIR)/build
+OUTPUT_DIR := $(OUTPUT)/build
OBJ_DIR := $(OUTPUT_DIR)/obj
INCLUDE_DIR := $(OUTPUT_DIR)/include
BPFOBJ_DIR := $(OBJ_DIR)/libbpf
SCXOBJ_DIR := $(OBJ_DIR)/sched_ext
BPFOBJ := $(BPFOBJ_DIR)/libbpf.a
LIBBPF_OUTPUT := $(OBJ_DIR)/libbpf/libbpf.a
-DEFAULT_BPFTOOL := $(OUTPUT_DIR)/sbin/bpftool
-HOST_BUILD_DIR := $(OBJ_DIR)
-HOST_OUTPUT_DIR := $(OUTPUT_DIR)
-VMLINUX_BTF_PATHS ?= ../../../../vmlinux \
+DEFAULT_BPFTOOL := $(OUTPUT_DIR)/host/sbin/bpftool
+HOST_OBJ_DIR := $(OBJ_DIR)/host/bpftool
+HOST_LIBBPF_OUTPUT := $(OBJ_DIR)/host/libbpf/
+HOST_LIBBPF_DESTDIR := $(OUTPUT_DIR)/host/
+HOST_DESTDIR := $(OUTPUT_DIR)/host/
+
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../../vmlinux \
/sys/kernel/btf/vmlinux \
/boot/vmlinux-$(shell uname -r)
VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
@@ -80,17 +74,23 @@ IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
# Use '-idirafter': Don't interfere with include mechanics except where the
# build would have failed anyways.
define get_sys_includes
-$(shell $(1) -v -E - </dev/null 2>&1 \
+$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
-$(shell $(1) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
endef
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
+
BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \
$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) \
-I$(CURDIR)/include -I$(CURDIR)/include/bpf-compat \
-I$(INCLUDE_DIR) -I$(APIDIR) -I$(SCXTOOLSINCDIR) \
-I$(REPOROOT)/include \
- $(call get_sys_includes,$(CLANG)) \
+ $(CLANG_SYS_INCLUDES) \
-Wall -Wno-compare-distinct-pointer-types \
-Wno-incompatible-function-pointer-types \
-O2 -mcpu=v3
@@ -98,7 +98,7 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \
# sort removes libbpf duplicates when not cross-building
MAKE_DIRS := $(sort $(OBJ_DIR)/libbpf $(OBJ_DIR)/libbpf \
$(OBJ_DIR)/bpftool $(OBJ_DIR)/resolve_btfids \
- $(INCLUDE_DIR) $(SCXOBJ_DIR))
+ $(HOST_OBJ_DIR) $(INCLUDE_DIR) $(SCXOBJ_DIR))
$(MAKE_DIRS):
$(call msg,MKDIR,,$@)
@@ -108,18 +108,19 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(OBJ_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \
+ ARCH=$(ARCH) CC="$(CC)" CROSS_COMPILE=$(CROSS_COMPILE) \
EXTRA_CFLAGS='-g -O0 -fPIC' \
DESTDIR=$(OUTPUT_DIR) prefix= all install_headers
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
- $(LIBBPF_OUTPUT) | $(OBJ_DIR)/bpftool
+ $(LIBBPF_OUTPUT) | $(HOST_OBJ_DIR)
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \
EXTRA_CFLAGS='-g -O0' \
- OUTPUT=$(OBJ_DIR)/bpftool/ \
- LIBBPF_OUTPUT=$(OBJ_DIR)/libbpf/ \
- LIBBPF_DESTDIR=$(OUTPUT_DIR)/ \
- prefix= DESTDIR=$(OUTPUT_DIR)/ install-bin
+ OUTPUT=$(HOST_OBJ_DIR)/ \
+ LIBBPF_OUTPUT=$(HOST_LIBBPF_OUTPUT) \
+ LIBBPF_DESTDIR=$(HOST_LIBBPF_DESTDIR) \
+ prefix= DESTDIR=$(HOST_DESTDIR) install-bin
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
ifeq ($(VMLINUX_H),)
@@ -150,9 +151,7 @@ $(INCLUDE_DIR)/%.bpf.skel.h: $(SCXOBJ_DIR)/%.bpf.o $(INCLUDE_DIR)/vmlinux.h $(BP
override define CLEAN
rm -rf $(OUTPUT_DIR)
- rm -f *.o *.bpf.o *.bpf.skel.h *.bpf.subskel.h
rm -f $(TEST_GEN_PROGS)
- rm -f runner
endef
# Every testcase takes all of the BPF progs are dependencies by default. This
@@ -185,7 +184,7 @@ auto-test-targets := \
testcase-targets := $(addsuffix .o,$(addprefix $(SCXOBJ_DIR)/,$(auto-test-targets)))
-$(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR)
+$(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR) $(BPFOBJ)
$(CC) $(CFLAGS) -c $< -o $@
# Create all of the test targets object files, whose testcase objects will be
@@ -196,21 +195,15 @@ $(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR)
# function doesn't support using implicit rules otherwise.
$(testcase-targets): $(SCXOBJ_DIR)/%.o: %.c $(SCXOBJ_DIR)/runner.o $(all_test_bpfprogs) | $(SCXOBJ_DIR)
$(eval test=$(patsubst %.o,%.c,$(notdir $@)))
- $(CC) $(CFLAGS) -c $< -o $@ $(SCXOBJ_DIR)/runner.o
+ $(CC) $(CFLAGS) -c $< -o $@
$(SCXOBJ_DIR)/util.o: util.c | $(SCXOBJ_DIR)
$(CC) $(CFLAGS) -c $< -o $@
-runner: $(SCXOBJ_DIR)/runner.o $(SCXOBJ_DIR)/util.o $(BPFOBJ) $(testcase-targets)
+$(OUTPUT)/runner: $(SCXOBJ_DIR)/runner.o $(SCXOBJ_DIR)/util.o $(BPFOBJ) $(testcase-targets)
@echo "$(testcase-targets)"
$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
-TEST_GEN_PROGS := runner
-
-all: runner
-
-.PHONY: all clean help
-
.DEFAULT_GOAL := all
.DELETE_ON_ERROR:
diff --git a/tools/testing/selftests/sched_ext/create_dsq.bpf.c b/tools/testing/selftests/sched_ext/create_dsq.bpf.c
index 23f79ed343f0..2cfc4ffd60e2 100644
--- a/tools/testing/selftests/sched_ext/create_dsq.bpf.c
+++ b/tools/testing/selftests/sched_ext/create_dsq.bpf.c
@@ -51,8 +51,8 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(create_dsq_init)
SEC(".struct_ops.link")
struct sched_ext_ops create_dsq_ops = {
- .init_task = create_dsq_init_task,
- .exit_task = create_dsq_exit_task,
- .init = create_dsq_init,
+ .init_task = (void *) create_dsq_init_task,
+ .exit_task = (void *) create_dsq_exit_task,
+ .init = (void *) create_dsq_init,
.name = "create_dsq",
};
diff --git a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
index e97ad41d354a..37d9bf6fb745 100644
--- a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
+++ b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
@@ -35,8 +35,8 @@ void BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops ddsp_bogus_dsq_fail_ops = {
- .select_cpu = ddsp_bogus_dsq_fail_select_cpu,
- .exit = ddsp_bogus_dsq_fail_exit,
+ .select_cpu = (void *) ddsp_bogus_dsq_fail_select_cpu,
+ .exit = (void *) ddsp_bogus_dsq_fail_exit,
.name = "ddsp_bogus_dsq_fail",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
index dde7e7dafbfb..dffc97d9cdf1 100644
--- a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
+++ b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
@@ -32,8 +32,8 @@ void BPF_STRUCT_OPS(ddsp_vtimelocal_fail_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops ddsp_vtimelocal_fail_ops = {
- .select_cpu = ddsp_vtimelocal_fail_select_cpu,
- .exit = ddsp_vtimelocal_fail_exit,
+ .select_cpu = (void *) ddsp_vtimelocal_fail_select_cpu,
+ .exit = (void *) ddsp_vtimelocal_fail_exit,
.name = "ddsp_vtimelocal_fail",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
index efb4672decb4..6a7db1502c29 100644
--- a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
@@ -56,10 +56,10 @@ void BPF_STRUCT_OPS(dsp_local_on_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops dsp_local_on_ops = {
- .select_cpu = dsp_local_on_select_cpu,
- .enqueue = dsp_local_on_enqueue,
- .dispatch = dsp_local_on_dispatch,
- .exit = dsp_local_on_exit,
+ .select_cpu = (void *) dsp_local_on_select_cpu,
+ .enqueue = (void *) dsp_local_on_enqueue,
+ .dispatch = (void *) dsp_local_on_dispatch,
+ .exit = (void *) dsp_local_on_exit,
.name = "dsp_local_on",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
index b0b99531d5d5..e1bd13e48889 100644
--- a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
+++ b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
@@ -12,10 +12,18 @@
char _license[] SEC("license") = "GPL";
+u32 exit_kind;
+
+void BPF_STRUCT_OPS_SLEEPABLE(enq_last_no_enq_fails_exit, struct scx_exit_info *info)
+{
+ exit_kind = info->kind;
+}
+
SEC(".struct_ops.link")
struct sched_ext_ops enq_last_no_enq_fails_ops = {
.name = "enq_last_no_enq_fails",
/* Need to define ops.enqueue() with SCX_OPS_ENQ_LAST */
.flags = SCX_OPS_ENQ_LAST,
+ .exit = (void *) enq_last_no_enq_fails_exit,
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
index 2a3eda5e2c0b..73e679953e27 100644
--- a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
+++ b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
@@ -31,8 +31,12 @@ static enum scx_test_status run(void *ctx)
struct bpf_link *link;
link = bpf_map__attach_struct_ops(skel->maps.enq_last_no_enq_fails_ops);
- if (link) {
- SCX_ERR("Incorrectly succeeded in to attaching scheduler");
+ if (!link) {
+ SCX_ERR("Incorrectly failed at attaching scheduler");
+ return SCX_TEST_FAIL;
+ }
+ if (!skel->bss->exit_kind) {
+ SCX_ERR("Incorrectly stayed loaded");
return SCX_TEST_FAIL;
}
@@ -50,7 +54,7 @@ static void cleanup(void *ctx)
struct scx_test enq_last_no_enq_fails = {
.name = "enq_last_no_enq_fails",
- .description = "Verify we fail to load a scheduler if we specify "
+ .description = "Verify we eject a scheduler if we specify "
"the SCX_OPS_ENQ_LAST flag without defining "
"ops.enqueue()",
.setup = setup,
diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
index b3dfc1033cd6..1efb50d61040 100644
--- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
+++ b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
@@ -36,8 +36,8 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
SEC(".struct_ops.link")
struct sched_ext_ops enq_select_cpu_fails_ops = {
- .select_cpu = enq_select_cpu_fails_select_cpu,
- .enqueue = enq_select_cpu_fails_enqueue,
+ .select_cpu = (void *) enq_select_cpu_fails_select_cpu,
+ .enqueue = (void *) enq_select_cpu_fails_enqueue,
.name = "enq_select_cpu_fails",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/exit.bpf.c b/tools/testing/selftests/sched_ext/exit.bpf.c
index ae12ddaac921..d75d4faf07f6 100644
--- a/tools/testing/selftests/sched_ext/exit.bpf.c
+++ b/tools/testing/selftests/sched_ext/exit.bpf.c
@@ -15,6 +15,8 @@ UEI_DEFINE(uei);
#define EXIT_CLEANLY() scx_bpf_exit(exit_point, "%d", exit_point)
+#define DSQ_ID 0
+
s32 BPF_STRUCT_OPS(exit_select_cpu, struct task_struct *p,
s32 prev_cpu, u64 wake_flags)
{
@@ -31,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
if (exit_point == EXIT_ENQUEUE)
EXIT_CLEANLY();
- scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
}
void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
@@ -39,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
if (exit_point == EXIT_DISPATCH)
EXIT_CLEANLY();
- scx_bpf_consume(SCX_DSQ_GLOBAL);
+ scx_bpf_consume(DSQ_ID);
}
void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
@@ -67,18 +69,18 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(exit_init)
if (exit_point == EXIT_INIT)
EXIT_CLEANLY();
- return 0;
+ return scx_bpf_create_dsq(DSQ_ID, -1);
}
SEC(".struct_ops.link")
struct sched_ext_ops exit_ops = {
- .select_cpu = exit_select_cpu,
- .enqueue = exit_enqueue,
- .dispatch = exit_dispatch,
- .init_task = exit_init_task,
- .enable = exit_enable,
- .exit = exit_exit,
- .init = exit_init,
+ .select_cpu = (void *) exit_select_cpu,
+ .enqueue = (void *) exit_enqueue,
+ .dispatch = (void *) exit_dispatch,
+ .init_task = (void *) exit_init_task,
+ .enable = (void *) exit_enable,
+ .exit = (void *) exit_exit,
+ .init = (void *) exit_init,
.name = "exit",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/hotplug.bpf.c b/tools/testing/selftests/sched_ext/hotplug.bpf.c
index 8f2601db39f3..6c9f25c9bf53 100644
--- a/tools/testing/selftests/sched_ext/hotplug.bpf.c
+++ b/tools/testing/selftests/sched_ext/hotplug.bpf.c
@@ -46,16 +46,16 @@ void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_offline, s32 cpu)
SEC(".struct_ops.link")
struct sched_ext_ops hotplug_cb_ops = {
- .cpu_online = hotplug_cpu_online,
- .cpu_offline = hotplug_cpu_offline,
- .exit = hotplug_exit,
+ .cpu_online = (void *) hotplug_cpu_online,
+ .cpu_offline = (void *) hotplug_cpu_offline,
+ .exit = (void *) hotplug_exit,
.name = "hotplug_cbs",
.timeout_ms = 1000U,
};
SEC(".struct_ops.link")
struct sched_ext_ops hotplug_nocb_ops = {
- .exit = hotplug_exit,
+ .exit = (void *) hotplug_exit,
.name = "hotplug_nocbs",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/init_enable_count.bpf.c b/tools/testing/selftests/sched_ext/init_enable_count.bpf.c
index 47ea89a626c3..5eb9edb1837d 100644
--- a/tools/testing/selftests/sched_ext/init_enable_count.bpf.c
+++ b/tools/testing/selftests/sched_ext/init_enable_count.bpf.c
@@ -45,9 +45,9 @@ void BPF_STRUCT_OPS(cnt_disable, struct task_struct *p)
SEC(".struct_ops.link")
struct sched_ext_ops init_enable_count_ops = {
- .init_task = cnt_init_task,
- .exit_task = cnt_exit_task,
- .enable = cnt_enable,
- .disable = cnt_disable,
+ .init_task = (void *) cnt_init_task,
+ .exit_task = (void *) cnt_exit_task,
+ .enable = (void *) cnt_enable,
+ .disable = (void *) cnt_disable,
.name = "init_enable_count",
};
diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
index 00bfa9cb95d3..4d4cd8d966db 100644
--- a/tools/testing/selftests/sched_ext/maximal.bpf.c
+++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
@@ -131,34 +131,34 @@ void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
SEC(".struct_ops.link")
struct sched_ext_ops maximal_ops = {
- .select_cpu = maximal_select_cpu,
- .enqueue = maximal_enqueue,
- .dequeue = maximal_dequeue,
- .dispatch = maximal_dispatch,
- .runnable = maximal_runnable,
- .running = maximal_running,
- .stopping = maximal_stopping,
- .quiescent = maximal_quiescent,
- .yield = maximal_yield,
- .core_sched_before = maximal_core_sched_before,
- .set_weight = maximal_set_weight,
- .set_cpumask = maximal_set_cpumask,
- .update_idle = maximal_update_idle,
- .cpu_acquire = maximal_cpu_acquire,
- .cpu_release = maximal_cpu_release,
- .cpu_online = maximal_cpu_online,
- .cpu_offline = maximal_cpu_offline,
- .init_task = maximal_init_task,
- .enable = maximal_enable,
- .exit_task = maximal_exit_task,
- .disable = maximal_disable,
- .cgroup_init = maximal_cgroup_init,
- .cgroup_exit = maximal_cgroup_exit,
- .cgroup_prep_move = maximal_cgroup_prep_move,
- .cgroup_move = maximal_cgroup_move,
- .cgroup_cancel_move = maximal_cgroup_cancel_move,
- .cgroup_set_weight = maximal_cgroup_set_weight,
- .init = maximal_init,
- .exit = maximal_exit,
+ .select_cpu = (void *) maximal_select_cpu,
+ .enqueue = (void *) maximal_enqueue,
+ .dequeue = (void *) maximal_dequeue,
+ .dispatch = (void *) maximal_dispatch,
+ .runnable = (void *) maximal_runnable,
+ .running = (void *) maximal_running,
+ .stopping = (void *) maximal_stopping,
+ .quiescent = (void *) maximal_quiescent,
+ .yield = (void *) maximal_yield,
+ .core_sched_before = (void *) maximal_core_sched_before,
+ .set_weight = (void *) maximal_set_weight,
+ .set_cpumask = (void *) maximal_set_cpumask,
+ .update_idle = (void *) maximal_update_idle,
+ .cpu_acquire = (void *) maximal_cpu_acquire,
+ .cpu_release = (void *) maximal_cpu_release,
+ .cpu_online = (void *) maximal_cpu_online,
+ .cpu_offline = (void *) maximal_cpu_offline,
+ .init_task = (void *) maximal_init_task,
+ .enable = (void *) maximal_enable,
+ .exit_task = (void *) maximal_exit_task,
+ .disable = (void *) maximal_disable,
+ .cgroup_init = (void *) maximal_cgroup_init,
+ .cgroup_exit = (void *) maximal_cgroup_exit,
+ .cgroup_prep_move = (void *) maximal_cgroup_prep_move,
+ .cgroup_move = (void *) maximal_cgroup_move,
+ .cgroup_cancel_move = (void *) maximal_cgroup_cancel_move,
+ .cgroup_set_weight = (void *) maximal_cgroup_set_weight,
+ .init = (void *) maximal_init,
+ .exit = (void *) maximal_exit,
.name = "maximal",
};
diff --git a/tools/testing/selftests/sched_ext/maybe_null.bpf.c b/tools/testing/selftests/sched_ext/maybe_null.bpf.c
index 27d0f386acfb..cf4ae870cd4e 100644
--- a/tools/testing/selftests/sched_ext/maybe_null.bpf.c
+++ b/tools/testing/selftests/sched_ext/maybe_null.bpf.c
@@ -29,8 +29,8 @@ bool BPF_STRUCT_OPS(maybe_null_success_yield, struct task_struct *from,
SEC(".struct_ops.link")
struct sched_ext_ops maybe_null_success = {
- .dispatch = maybe_null_success_dispatch,
- .yield = maybe_null_success_yield,
- .enable = maybe_null_running,
+ .dispatch = (void *) maybe_null_success_dispatch,
+ .yield = (void *) maybe_null_success_yield,
+ .enable = (void *) maybe_null_running,
.name = "minimal",
};
diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
index c0641050271d..ec724d7b33d1 100644
--- a/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
+++ b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
@@ -19,7 +19,7 @@ void BPF_STRUCT_OPS(maybe_null_fail_dispatch, s32 cpu, struct task_struct *p)
SEC(".struct_ops.link")
struct sched_ext_ops maybe_null_fail = {
- .dispatch = maybe_null_fail_dispatch,
- .enable = maybe_null_running,
+ .dispatch = (void *) maybe_null_fail_dispatch,
+ .enable = (void *) maybe_null_running,
.name = "maybe_null_fail_dispatch",
};
diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
index 3c1740028e3b..e6552cace020 100644
--- a/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
+++ b/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
@@ -22,7 +22,7 @@ bool BPF_STRUCT_OPS(maybe_null_fail_yield, struct task_struct *from,
SEC(".struct_ops.link")
struct sched_ext_ops maybe_null_fail = {
- .yield = maybe_null_fail_yield,
- .enable = maybe_null_running,
+ .yield = (void *) maybe_null_fail_yield,
+ .enable = (void *) maybe_null_running,
.name = "maybe_null_fail_yield",
};
diff --git a/tools/testing/selftests/sched_ext/prog_run.bpf.c b/tools/testing/selftests/sched_ext/prog_run.bpf.c
index 6a4d7c48e3f2..00c267626a68 100644
--- a/tools/testing/selftests/sched_ext/prog_run.bpf.c
+++ b/tools/testing/selftests/sched_ext/prog_run.bpf.c
@@ -28,6 +28,6 @@ void BPF_STRUCT_OPS(prog_run_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops prog_run_ops = {
- .exit = prog_run_exit,
+ .exit = (void *) prog_run_exit,
.name = "prog_run",
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
index 2ed2991afafe..f171ac470970 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
@@ -35,6 +35,6 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dfl_ops = {
- .enqueue = select_cpu_dfl_enqueue,
+ .enqueue = (void *) select_cpu_dfl_enqueue,
.name = "select_cpu_dfl",
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
index 4bb5abb2d369..9efdbb7da928 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
@@ -82,8 +82,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dfl_nodispatch_ops = {
- .select_cpu = select_cpu_dfl_nodispatch_select_cpu,
- .enqueue = select_cpu_dfl_nodispatch_enqueue,
- .init_task = select_cpu_dfl_nodispatch_init_task,
+ .select_cpu = (void *) select_cpu_dfl_nodispatch_select_cpu,
+ .enqueue = (void *) select_cpu_dfl_nodispatch_enqueue,
+ .init_task = (void *) select_cpu_dfl_nodispatch_init_task,
.name = "select_cpu_dfl_nodispatch",
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
index f0b96a4a04b2..59bfc4f36167 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
@@ -35,7 +35,7 @@ dispatch:
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dispatch_ops = {
- .select_cpu = select_cpu_dispatch_select_cpu,
+ .select_cpu = (void *) select_cpu_dispatch_select_cpu,
.name = "select_cpu_dispatch",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
index 7b42ddce0f56..3bbd5fcdfb18 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
@@ -30,8 +30,8 @@ void BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dispatch_bad_dsq_ops = {
- .select_cpu = select_cpu_dispatch_bad_dsq_select_cpu,
- .exit = select_cpu_dispatch_bad_dsq_exit,
+ .select_cpu = (void *) select_cpu_dispatch_bad_dsq_select_cpu,
+ .exit = (void *) select_cpu_dispatch_bad_dsq_exit,
.name = "select_cpu_dispatch_bad_dsq",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
index 653e3dc0b4dc..0fda57fe0ecf 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
@@ -31,8 +31,8 @@ void BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dispatch_dbl_dsp_ops = {
- .select_cpu = select_cpu_dispatch_dbl_dsp_select_cpu,
- .exit = select_cpu_dispatch_dbl_dsp_exit,
+ .select_cpu = (void *) select_cpu_dispatch_dbl_dsp_select_cpu,
+ .exit = (void *) select_cpu_dispatch_dbl_dsp_exit,
.name = "select_cpu_dispatch_dbl_dsp",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
index 7f3ebf4fc2ea..e6c67bcf5e6e 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
@@ -81,12 +81,12 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init)
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_vtime_ops = {
- .select_cpu = select_cpu_vtime_select_cpu,
- .dispatch = select_cpu_vtime_dispatch,
- .running = select_cpu_vtime_running,
- .stopping = select_cpu_vtime_stopping,
- .enable = select_cpu_vtime_enable,
- .init = select_cpu_vtime_init,
+ .select_cpu = (void *) select_cpu_vtime_select_cpu,
+ .dispatch = (void *) select_cpu_vtime_dispatch,
+ .running = (void *) select_cpu_vtime_running,
+ .stopping = (void *) select_cpu_vtime_stopping,
+ .enable = (void *) select_cpu_vtime_enable,
+ .init = (void *) select_cpu_vtime_init,
.name = "select_cpu_vtime",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
index bc71cbca0dde..a1f506ba5578 100644
--- a/tools/testing/selftests/watchdog/watchdog-test.c
+++ b/tools/testing/selftests/watchdog/watchdog-test.c
@@ -334,7 +334,13 @@ int main(int argc, char *argv[])
printf("Watchdog Ticking Away!\n");
+ /*
+ * Register the signals
+ */
signal(SIGINT, term);
+ signal(SIGTERM, term);
+ signal(SIGKILL, term);
+ signal(SIGQUIT, term);
while (1) {
keep_alive();
diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c
index c53f220eb6cc..b33b47342d41 100644
--- a/tools/testing/vma/vma.c
+++ b/tools/testing/vma/vma.c
@@ -1522,6 +1522,45 @@ static bool test_copy_vma(void)
return true;
}
+static bool test_expand_only_mode(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vm_area_struct *vma_prev, *vma;
+ VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
+
+ /*
+ * Place a VMA prior to the one we're expanding so we assert that we do
+ * not erroneously try to traverse to the previous VMA even though we
+ * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
+ * need to do so.
+ */
+ alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
+
+ /*
+ * We will be positioned at the prev VMA, but looking to expand to
+ * 0x9000.
+ */
+ vma_iter_set(&vmi, 0x3000);
+ vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
+
+ vma = vma_merge_new_range(&vmg);
+ ASSERT_NE(vma, NULL);
+ ASSERT_EQ(vma, vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma->vm_start, 0x3000);
+ ASSERT_EQ(vma->vm_end, 0x9000);
+ ASSERT_EQ(vma->vm_pgoff, 3);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
+
+ cleanup_mm(&mm, &vmi);
+ return true;
+}
+
int main(void)
{
int num_tests = 0, num_fail = 0;
@@ -1553,6 +1592,7 @@ int main(void)
TEST(vmi_prealloc_fail);
TEST(merge_extend);
TEST(copy_vma);
+ TEST(expand_only_mode);
#undef TEST
diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c
index b29101986b5a..6b78d4a81e95 100644
--- a/tools/usb/usbip/src/usbip_detach.c
+++ b/tools/usb/usbip/src/usbip_detach.c
@@ -68,6 +68,7 @@ static int detach_port(char *port)
}
if (!found) {
+ ret = -1;
err("Invalid port %s > maxports %d",
port, vhci_driver->nports);
goto call_driver_close;