summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2024-10-15 11:16:25 -0700
committerAlexei Starovoitov <ast@kernel.org>2024-10-15 11:16:25 -0700
commitee230090f62fbb1c63c7f305d57289ab753221ef (patch)
treee182ec5e34751574f8d6d7a69ddb8f77a40bc0c4 /tools
parentb836cbdf3b81a4a22b3452186efa2e5105a77e10 (diff)
parent35ccd576a23ce495b4064f4a3445626de790cd23 (diff)
Merge branch 'fix-truncation-bug-in-coerce_reg_to_size_sx-and-extend-selftests'
Dimitar Kanaliev says: ==================== Fix truncation bug in coerce_reg_to_size_sx and extend selftests. This patch series addresses a truncation bug in the eBPF verifier function coerce_reg_to_size_sx(). The issue was caused by the incorrect ordering of assignments between 32-bit and 64-bit min/max values, leading to improper truncation when updating the register state. This issue has been reported previously by Zac Ecob[1] , but was not followed up on. The first patch fixes the assignment order in coerce_reg_to_size_sx() to ensure correct truncation. The subsequent patches add selftests for coerce_{reg,subreg}_to_size_sx. Changelog: v1 -> v2: - Moved selftests inside the conditional check for cpuv4 [1] (https://lore.kernel.org/bpf/h3qKLDEO6m9nhif0eAQX4fVrqdO0D_OPb0y5HfMK9jBePEKK33wQ3K-bqSVnr0hiZdFZtSJOsbNkcEQGpv_yJk61PAAiO8fUkgMRSO-lB50=@protonmail.com/) ==================== Link: https://lore.kernel.org/r/20241014121155.92887-1-dimitar.kanaliev@siteground.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
index 028ec855587b..994bbc346d25 100644
--- a/tools/testing/selftests/bpf/progs/verifier_movsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -287,6 +287,46 @@ l0_%=: \
: __clobber_all);
}
+SEC("socket")
+__description("MOV64SX, S8, unsigned range_check")
+__success __retval(0)
+__naked void mov64sx_s8_range_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x1; \
+ r0 += 0xfe; \
+ r0 = (s8)r0; \
+ if r0 < 0xfffffffffffffffe goto label_%=; \
+ r0 = 0; \
+ exit; \
+label_%=: \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, unsigned range_check")
+__success __retval(0)
+__naked void mov32sx_s8_range_check(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0x1; \
+ w0 += 0xfe; \
+ w0 = (s8)w0; \
+ if w0 < 0xfffffffe goto label_%=; \
+ r0 = 0; \
+ exit; \
+label_%=: \
+ exit; \
+ " :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
#else
SEC("socket")