diff options
Diffstat (limited to 'tools/testing/selftests')
74 files changed, 4745 insertions, 483 deletions
diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile index 1e8d9a8f59df..9460cbe81bcc 100644 --- a/tools/testing/selftests/arm64/Makefile +++ b/tools/testing/selftests/arm64/Makefile @@ -17,16 +17,7 @@ top_srcdir = $(realpath ../../../../) # Additional include paths needed by kselftest.h and local headers CFLAGS += -I$(top_srcdir)/tools/testing/selftests/ -# Guessing where the Kernel headers could have been installed -# depending on ENV config -ifeq ($(KBUILD_OUTPUT),) -khdr_dir = $(top_srcdir)/usr/include -else -# the KSFT preferred location when KBUILD_OUTPUT is set -khdr_dir = $(KBUILD_OUTPUT)/kselftest/usr/include -endif - -CFLAGS += -I$(khdr_dir) +CFLAGS += $(KHDR_INCLUDES) export CFLAGS export top_srcdir diff --git a/tools/testing/selftests/arm64/abi/.gitignore b/tools/testing/selftests/arm64/abi/.gitignore index b79cf5814c23..b9e54417250d 100644 --- a/tools/testing/selftests/arm64/abi/.gitignore +++ b/tools/testing/selftests/arm64/abi/.gitignore @@ -1 +1,2 @@ syscall-abi +tpidr2 diff --git a/tools/testing/selftests/arm64/abi/Makefile b/tools/testing/selftests/arm64/abi/Makefile index 96eba974ac8d..c8d7f2495eb2 100644 --- a/tools/testing/selftests/arm64/abi/Makefile +++ b/tools/testing/selftests/arm64/abi/Makefile @@ -1,8 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (C) 2021 ARM Limited -TEST_GEN_PROGS := syscall-abi +TEST_GEN_PROGS := syscall-abi tpidr2 include ../../lib.mk $(OUTPUT)/syscall-abi: syscall-abi.c syscall-abi-asm.S + +# Build with nolibc since TPIDR2 is intended to be actively managed by +# libc and we're trying to test the functionality that it depends on here. +$(OUTPUT)/tpidr2: tpidr2.c + $(CC) -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ + -static -include ../../../../include/nolibc/nolibc.h \ + -ffreestanding -Wall $^ -o $@ -lgcc diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S index 983467cfcee0..b523c21c2278 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S +++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S @@ -9,15 +9,42 @@ // invoked is configured in x8 of the input GPR data. // // x0: SVE VL, 0 for FP only +// x1: SME VL // // GPRs: gpr_in, gpr_out // FPRs: fpr_in, fpr_out // Zn: z_in, z_out // Pn: p_in, p_out // FFR: ffr_in, ffr_out +// ZA: za_in, za_out +// SVCR: svcr_in, svcr_out + +#include "syscall-abi.h" .arch_extension sve +/* + * LDR (vector to ZA array): + * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _ldr_za nw, nxbase, offset=0 + .inst 0xe1000000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + +/* + * STR (vector from ZA array): + * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _str_za nw, nxbase, offset=0 + .inst 0xe1200000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + .globl do_syscall do_syscall: // Store callee saved registers x19-x29 (80 bytes) plus x0 and x1 @@ -30,6 +57,24 @@ do_syscall: stp x25, x26, [sp, #80] stp x27, x28, [sp, #96] + // Set SVCR if we're doing SME + cbz x1, 1f + adrp x2, svcr_in + ldr x2, [x2, :lo12:svcr_in] + msr S3_3_C4_C2_2, x2 +1: + + // Load ZA if it's enabled - uses x12 as scratch due to SME LDR + tbz x2, #SVCR_ZA_SHIFT, 1f + mov w12, #0 + ldr x2, =za_in +2: _ldr_za 12, 2 + add x2, x2, x1 + add x12, x12, #1 + cmp x1, x12 + bne 2b +1: + // Load GPRs x8-x28, and save our SP/FP for later comparison ldr x2, =gpr_in add x2, x2, #64 @@ -68,7 +113,7 @@ do_syscall: ldp q30, q31, [x2, #16 * 30] 1: - // Load the SVE registers if we're doing SVE + // Load the SVE registers if we're doing SVE/SME cbz x0, 1f ldr x2, =z_in @@ -105,9 +150,14 @@ do_syscall: ldr z30, [x2, #30, MUL VL] ldr z31, [x2, #31, MUL VL] + // Only set a non-zero FFR, test patterns must be zero since the + // syscall should clear it - this lets us handle FA64. ldr x2, =ffr_in ldr p0, [x2, #0] + ldr x2, [x2, #0] + cbz x2, 2f wrffr p0.b +2: ldr x2, =p_in ldr p0, [x2, #0, MUL VL] @@ -169,6 +219,24 @@ do_syscall: stp q28, q29, [x2, #16 * 28] stp q30, q31, [x2, #16 * 30] + // Save SVCR if we're doing SME + cbz x1, 1f + mrs x2, S3_3_C4_C2_2 + adrp x3, svcr_out + str x2, [x3, :lo12:svcr_out] +1: + + // Save ZA if it's enabled - uses x12 as scratch due to SME STR + tbz x2, #SVCR_ZA_SHIFT, 1f + mov w12, #0 + ldr x2, =za_out +2: _str_za 12, 2 + add x2, x2, x1 + add x12, x12, #1 + cmp x1, x12 + bne 2b +1: + // Save the SVE state if we have some cbz x0, 1f @@ -224,6 +292,10 @@ do_syscall: str p14, [x2, #14, MUL VL] str p15, [x2, #15, MUL VL] + // Only save FFR if we wrote a value for SME + ldr x2, =ffr_in + ldr x2, [x2, #0] + cbz x2, 1f ldr x2, =ffr_out rdffr p0.b str p0, [x2, #0] @@ -237,4 +309,9 @@ do_syscall: ldp x27, x28, [sp, #96] ldp x29, x30, [sp], #112 + // Clear SVCR if we were doing SME so future tests don't have ZA + cbz x1, 1f + msr S3_3_C4_C2_2, xzr +1: + ret diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index 1e13b7523918..b632bfe9e022 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -18,9 +18,13 @@ #include "../../kselftest.h" +#include "syscall-abi.h" + #define NUM_VL ((SVE_VQ_MAX - SVE_VQ_MIN) + 1) -extern void do_syscall(int sve_vl); +static int default_sme_vl; + +extern void do_syscall(int sve_vl, int sme_vl); static void fill_random(void *buf, size_t size) { @@ -48,14 +52,15 @@ static struct syscall_cfg { uint64_t gpr_in[NUM_GPR]; uint64_t gpr_out[NUM_GPR]; -static void setup_gpr(struct syscall_cfg *cfg, int sve_vl) +static void setup_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { fill_random(gpr_in, sizeof(gpr_in)); gpr_in[8] = cfg->syscall_nr; memset(gpr_out, 0, sizeof(gpr_out)); } -static int check_gpr(struct syscall_cfg *cfg, int sve_vl) +static int check_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr) { int errors = 0; int i; @@ -79,13 +84,15 @@ static int check_gpr(struct syscall_cfg *cfg, int sve_vl) uint64_t fpr_in[NUM_FPR * 2]; uint64_t fpr_out[NUM_FPR * 2]; -static void setup_fpr(struct syscall_cfg *cfg, int sve_vl) +static void setup_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { fill_random(fpr_in, sizeof(fpr_in)); memset(fpr_out, 0, sizeof(fpr_out)); } -static int check_fpr(struct syscall_cfg *cfg, int sve_vl) +static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { int errors = 0; int i; @@ -109,13 +116,15 @@ static uint8_t z_zero[__SVE_ZREG_SIZE(SVE_VQ_MAX)]; uint8_t z_in[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; uint8_t z_out[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; -static void setup_z(struct syscall_cfg *cfg, int sve_vl) +static void setup_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { fill_random(z_in, sizeof(z_in)); fill_random(z_out, sizeof(z_out)); } -static int check_z(struct syscall_cfg *cfg, int sve_vl) +static int check_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { size_t reg_size = sve_vl; int errors = 0; @@ -126,13 +135,17 @@ static int check_z(struct syscall_cfg *cfg, int sve_vl) /* * After a syscall the low 128 bits of the Z registers should - * be preserved and the rest be zeroed or preserved. + * be preserved and the rest be zeroed or preserved, except if + * we were in streaming mode in which case the low 128 bits may + * also be cleared by the transition out of streaming mode. */ for (i = 0; i < SVE_NUM_ZREGS; i++) { void *in = &z_in[reg_size * i]; void *out = &z_out[reg_size * i]; - if (memcmp(in, out, SVE_VQ_BYTES) != 0) { + if ((memcmp(in, out, SVE_VQ_BYTES) != 0) && + !((svcr & SVCR_SM_MASK) && + memcmp(z_zero, out, SVE_VQ_BYTES) == 0)) { ksft_print_msg("%s SVE VL %d Z%d low 128 bits changed\n", cfg->name, sve_vl, i); errors++; @@ -145,13 +158,15 @@ static int check_z(struct syscall_cfg *cfg, int sve_vl) uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)]; uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)]; -static void setup_p(struct syscall_cfg *cfg, int sve_vl) +static void setup_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { fill_random(p_in, sizeof(p_in)); fill_random(p_out, sizeof(p_out)); } -static int check_p(struct syscall_cfg *cfg, int sve_vl) +static int check_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */ @@ -175,9 +190,20 @@ static int check_p(struct syscall_cfg *cfg, int sve_vl) uint8_t ffr_in[__SVE_PREG_SIZE(SVE_VQ_MAX)]; uint8_t ffr_out[__SVE_PREG_SIZE(SVE_VQ_MAX)]; -static void setup_ffr(struct syscall_cfg *cfg, int sve_vl) +static void setup_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { /* + * If we are in streaming mode and do not have FA64 then FFR + * is unavailable. + */ + if ((svcr & SVCR_SM_MASK) && + !(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)) { + memset(&ffr_in, 0, sizeof(ffr_in)); + return; + } + + /* * It is only valid to set a contiguous set of bits starting * at 0. For now since we're expecting this to be cleared by * a syscall just set all bits. @@ -186,7 +212,8 @@ static void setup_ffr(struct syscall_cfg *cfg, int sve_vl) fill_random(ffr_out, sizeof(ffr_out)); } -static int check_ffr(struct syscall_cfg *cfg, int sve_vl) +static int check_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */ int errors = 0; @@ -195,6 +222,10 @@ static int check_ffr(struct syscall_cfg *cfg, int sve_vl) if (!sve_vl) return 0; + if ((svcr & SVCR_SM_MASK) && + !(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)) + return 0; + /* After a syscall the P registers should be preserved or zeroed */ for (i = 0; i < reg_size; i++) if (ffr_out[i] && (ffr_in[i] != ffr_out[i])) @@ -206,8 +237,65 @@ static int check_ffr(struct syscall_cfg *cfg, int sve_vl) return errors; } -typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl); -typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl); +uint64_t svcr_in, svcr_out; + +static void setup_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + svcr_in = svcr; +} + +static int check_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + int errors = 0; + + if (svcr_out & SVCR_SM_MASK) { + ksft_print_msg("%s Still in SM, SVCR %llx\n", + cfg->name, svcr_out); + errors++; + } + + if ((svcr_in & SVCR_ZA_MASK) != (svcr_out & SVCR_ZA_MASK)) { + ksft_print_msg("%s PSTATE.ZA changed, SVCR %llx != %llx\n", + cfg->name, svcr_in, svcr_out); + errors++; + } + + return errors; +} + +uint8_t za_in[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; +uint8_t za_out[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; + +static void setup_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + fill_random(za_in, sizeof(za_in)); + memset(za_out, 0, sizeof(za_out)); +} + +static int check_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + size_t reg_size = sme_vl * sme_vl; + int errors = 0; + + if (!(svcr & SVCR_ZA_MASK)) + return 0; + + if (memcmp(za_in, za_out, reg_size) != 0) { + ksft_print_msg("SME VL %d ZA does not match\n", sme_vl); + errors++; + } + + return errors; +} + +typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr); +typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr); /* * Each set of registers has a setup function which is called before @@ -225,20 +313,23 @@ static struct { { setup_z, check_z }, { setup_p, check_p }, { setup_ffr, check_ffr }, + { setup_svcr, check_svcr }, + { setup_za, check_za }, }; -static bool do_test(struct syscall_cfg *cfg, int sve_vl) +static bool do_test(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { int errors = 0; int i; for (i = 0; i < ARRAY_SIZE(regset); i++) - regset[i].setup(cfg, sve_vl); + regset[i].setup(cfg, sve_vl, sme_vl, svcr); - do_syscall(sve_vl); + do_syscall(sve_vl, sme_vl); for (i = 0; i < ARRAY_SIZE(regset); i++) - errors += regset[i].check(cfg, sve_vl); + errors += regset[i].check(cfg, sve_vl, sme_vl, svcr); return errors == 0; } @@ -246,9 +337,10 @@ static bool do_test(struct syscall_cfg *cfg, int sve_vl) static void test_one_syscall(struct syscall_cfg *cfg) { int sve_vq, sve_vl; + int sme_vq, sme_vl; /* FPSIMD only case */ - ksft_test_result(do_test(cfg, 0), + ksft_test_result(do_test(cfg, 0, default_sme_vl, 0), "%s FPSIMD\n", cfg->name); if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) @@ -265,8 +357,36 @@ static void test_one_syscall(struct syscall_cfg *cfg) if (sve_vq != sve_vq_from_vl(sve_vl)) sve_vq = sve_vq_from_vl(sve_vl); - ksft_test_result(do_test(cfg, sve_vl), + ksft_test_result(do_test(cfg, sve_vl, default_sme_vl, 0), "%s SVE VL %d\n", cfg->name, sve_vl); + + if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) + continue; + + for (sme_vq = SVE_VQ_MAX; sme_vq > 0; --sme_vq) { + sme_vl = prctl(PR_SME_SET_VL, sme_vq * 16); + if (sme_vl == -1) + ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n", + strerror(errno), errno); + + sme_vl &= PR_SME_VL_LEN_MASK; + + if (sme_vq != sve_vq_from_vl(sme_vl)) + sme_vq = sve_vq_from_vl(sme_vl); + + ksft_test_result(do_test(cfg, sve_vl, sme_vl, + SVCR_ZA_MASK | SVCR_SM_MASK), + "%s SVE VL %d/SME VL %d SM+ZA\n", + cfg->name, sve_vl, sme_vl); + ksft_test_result(do_test(cfg, sve_vl, sme_vl, + SVCR_SM_MASK), + "%s SVE VL %d/SME VL %d SM\n", + cfg->name, sve_vl, sme_vl); + ksft_test_result(do_test(cfg, sve_vl, sme_vl, + SVCR_ZA_MASK), + "%s SVE VL %d/SME VL %d ZA\n", + cfg->name, sve_vl, sme_vl); + } } } @@ -299,14 +419,54 @@ int sve_count_vls(void) return vl_count; } +int sme_count_vls(void) +{ + unsigned int vq; + int vl_count = 0; + int vl; + + if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) + return 0; + + /* Ensure we configure a SME VL, used to flag if SVCR is set */ + default_sme_vl = 16; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SME_SET_VL, vq * 16); + if (vl == -1) + ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n", + strerror(errno), errno); + + vl &= PR_SME_VL_LEN_MASK; + + if (vq != sve_vq_from_vl(vl)) + vq = sve_vq_from_vl(vl); + + vl_count++; + } + + return vl_count; +} + int main(void) { int i; + int tests = 1; /* FPSIMD */ srandom(getpid()); ksft_print_header(); - ksft_set_plan(ARRAY_SIZE(syscalls) * (sve_count_vls() + 1)); + tests += sve_count_vls(); + tests += (sve_count_vls() * sme_count_vls()) * 3; + ksft_set_plan(ARRAY_SIZE(syscalls) * tests); + + if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64) + ksft_print_msg("SME with FA64\n"); + else if (getauxval(AT_HWCAP2) & HWCAP2_SME) + ksft_print_msg("SME without FA64\n"); for (i = 0; i < ARRAY_SIZE(syscalls); i++) test_one_syscall(&syscalls[i]); diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.h b/tools/testing/selftests/arm64/abi/syscall-abi.h new file mode 100644 index 000000000000..bda5a87ad381 --- /dev/null +++ b/tools/testing/selftests/arm64/abi/syscall-abi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 ARM Limited. + */ + +#ifndef SYSCALL_ABI_H +#define SYSCALL_ABI_H + +#define SVCR_ZA_MASK 2 +#define SVCR_SM_MASK 1 + +#define SVCR_ZA_SHIFT 1 +#define SVCR_SM_SHIFT 0 + +#endif diff --git a/tools/testing/selftests/arm64/abi/tpidr2.c b/tools/testing/selftests/arm64/abi/tpidr2.c new file mode 100644 index 000000000000..351a098b503a --- /dev/null +++ b/tools/testing/selftests/arm64/abi/tpidr2.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/sched.h> +#include <linux/wait.h> + +#define SYS_TPIDR2 "S3_3_C13_C0_5" + +#define EXPECTED_TESTS 5 + +static void putstr(const char *str) +{ + write(1, str, strlen(str)); +} + +static void putnum(unsigned int num) +{ + char c; + + if (num / 10) + putnum(num / 10); + + c = '0' + (num % 10); + write(1, &c, 1); +} + +static int tests_run; +static int tests_passed; +static int tests_failed; +static int tests_skipped; + +static void set_tpidr2(uint64_t val) +{ + asm volatile ( + "msr " SYS_TPIDR2 ", %0\n" + : + : "r"(val) + : "cc"); +} + +static uint64_t get_tpidr2(void) +{ + uint64_t val; + + asm volatile ( + "mrs %0, " SYS_TPIDR2 "\n" + : "=r"(val) + : + : "cc"); + + return val; +} + +static void print_summary(void) +{ + if (tests_passed + tests_failed + tests_skipped != EXPECTED_TESTS) + putstr("# UNEXPECTED TEST COUNT: "); + + putstr("# Totals: pass:"); + putnum(tests_passed); + putstr(" fail:"); + putnum(tests_failed); + putstr(" xfail:0 xpass:0 skip:"); + putnum(tests_skipped); + putstr(" error:0\n"); +} + +/* Processes should start with TPIDR2 == 0 */ +static int default_value(void) +{ + return get_tpidr2() == 0; +} + +/* If we set TPIDR2 we should read that value */ +static int write_read(void) +{ + set_tpidr2(getpid()); + + return getpid() == get_tpidr2(); +} + +/* If we set a value we should read the same value after scheduling out */ +static int write_sleep_read(void) +{ + set_tpidr2(getpid()); + + msleep(100); + + return getpid() == get_tpidr2(); +} + +/* + * If we fork the value in the parent should be unchanged and the + * child should start with the same value and be able to set its own + * value. + */ +static int write_fork_read(void) +{ + pid_t newpid, waiting, oldpid; + int status; + + set_tpidr2(getpid()); + + oldpid = getpid(); + newpid = fork(); + if (newpid == 0) { + /* In child */ + if (get_tpidr2() != oldpid) { + putstr("# TPIDR2 changed in child: "); + putnum(get_tpidr2()); + putstr("\n"); + exit(0); + } + + set_tpidr2(getpid()); + if (get_tpidr2() == getpid()) { + exit(1); + } else { + putstr("# Failed to set TPIDR2 in child\n"); + exit(0); + } + } + if (newpid < 0) { + putstr("# fork() failed: -"); + putnum(-newpid); + putstr("\n"); + return 0; + } + + for (;;) { + waiting = waitpid(newpid, &status, 0); + + if (waiting < 0) { + if (errno == EINTR) + continue; + putstr("# waitpid() failed: "); + putnum(errno); + putstr("\n"); + return 0; + } + if (waiting != newpid) { + putstr("# waitpid() returned wrong PID\n"); + return 0; + } + + if (!WIFEXITED(status)) { + putstr("# child did not exit\n"); + return 0; + } + + if (getpid() != get_tpidr2()) { + putstr("# TPIDR2 corrupted in parent\n"); + return 0; + } + + return WEXITSTATUS(status); + } +} + +/* + * sys_clone() has a lot of per architecture variation so just define + * it here rather than adding it to nolibc, plus the raw API is a + * little more convenient for this test. + */ +static int sys_clone(unsigned long clone_flags, unsigned long newsp, + int *parent_tidptr, unsigned long tls, + int *child_tidptr) +{ + return my_syscall5(__NR_clone, clone_flags, newsp, parent_tidptr, tls, + child_tidptr); +} + +/* + * If we clone with CLONE_SETTLS then the value in the parent should + * be unchanged and the child should start with zero and be able to + * set its own value. + */ +static int write_clone_read(void) +{ + int parent_tid, child_tid; + pid_t parent, waiting; + int ret, status; + + parent = getpid(); + set_tpidr2(parent); + + ret = sys_clone(CLONE_SETTLS, 0, &parent_tid, 0, &child_tid); + if (ret == -1) { + putstr("# clone() failed\n"); + putnum(errno); + putstr("\n"); + return 0; + } + + if (ret == 0) { + /* In child */ + if (get_tpidr2() != 0) { + putstr("# TPIDR2 non-zero in child: "); + putnum(get_tpidr2()); + putstr("\n"); + exit(0); + } + + if (gettid() == 0) + putstr("# Child TID==0\n"); + set_tpidr2(gettid()); + if (get_tpidr2() == gettid()) { + exit(1); + } else { + putstr("# Failed to set TPIDR2 in child\n"); + exit(0); + } + } + + for (;;) { + waiting = wait4(ret, &status, __WCLONE, NULL); + + if (waiting < 0) { + if (errno == EINTR) + continue; + putstr("# wait4() failed: "); + putnum(errno); + putstr("\n"); + return 0; + } + if (waiting != ret) { + putstr("# wait4() returned wrong PID "); + putnum(waiting); + putstr("\n"); + return 0; + } + + if (!WIFEXITED(status)) { + putstr("# child did not exit\n"); + return 0; + } + + if (parent != get_tpidr2()) { + putstr("# TPIDR2 corrupted in parent\n"); + return 0; + } + + return WEXITSTATUS(status); + } +} + +#define run_test(name) \ + if (name()) { \ + tests_passed++; \ + } else { \ + tests_failed++; \ + putstr("not "); \ + } \ + putstr("ok "); \ + putnum(++tests_run); \ + putstr(" " #name "\n"); + +int main(int argc, char **argv) +{ + int ret, i; + + putstr("TAP version 13\n"); + putstr("1.."); + putnum(EXPECTED_TESTS); + putstr("\n"); + + putstr("# PID: "); + putnum(getpid()); + putstr("\n"); + + /* + * This test is run with nolibc which doesn't support hwcap and + * it's probably disproportionate to implement so instead check + * for the default vector length configuration in /proc. + */ + ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0); + if (ret >= 0) { + run_test(default_value); + run_test(write_read); + run_test(write_sleep_read); + run_test(write_fork_read); + run_test(write_clone_read); + + } else { + putstr("# SME support not present\n"); + + for (i = 0; i < EXPECTED_TESTS; i++) { + putstr("ok "); + putnum(i); + putstr(" skipped, TPIDR2 not supported\n"); + } + + tests_skipped += EXPECTED_TESTS; + } + + print_summary(); + + return 0; +} diff --git a/tools/testing/selftests/arm64/bti/Makefile b/tools/testing/selftests/arm64/bti/Makefile index 73e013c082a6..ccdac414ad94 100644 --- a/tools/testing/selftests/arm64/bti/Makefile +++ b/tools/testing/selftests/arm64/bti/Makefile @@ -10,7 +10,7 @@ PROGS := $(patsubst %,gen/%,$(TEST_GEN_PROGS)) # cases for statically linked and dynamically lined binaries are # slightly different. -CFLAGS_NOBTI = -DBTI=0 +CFLAGS_NOBTI = -mbranch-protection=none -DBTI=0 CFLAGS_BTI = -mbranch-protection=standard -DBTI=1 CFLAGS_COMMON = -ffreestanding -Wall -Wextra $(CFLAGS) @@ -39,7 +39,7 @@ BTI_OBJS = \ teststubs-bti.o \ trampoline-bti.o gen/btitest: $(BTI_OBJS) - $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -o $@ $^ + $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -static -o $@ $^ NOBTI_OBJS = \ test-nobti.o \ @@ -50,7 +50,7 @@ NOBTI_OBJS = \ teststubs-nobti.o \ trampoline-nobti.o gen/nobtitest: $(NOBTI_OBJS) - $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -o $@ $^ + $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -static -o $@ $^ # Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list # to account for any OUTPUT target-dirs optionally provided by diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index c50d86331ed2..ea947af63882 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -1,8 +1,13 @@ fp-pidbench fpsimd-test +rdvl-sme rdvl-sve sve-probe-vls sve-ptrace sve-test +ssve-test vec-syscfg vlset +za-fork +za-ptrace +za-test diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index 95f0b877a060..a7c2286bf65b 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -1,24 +1,42 @@ # SPDX-License-Identifier: GPL-2.0 -CFLAGS += -I../../../../../usr/include/ -TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg -TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ - rdvl-sve \ - sve-test sve-stress \ +# A proper top_srcdir is needed by KSFT(lib.mk) +top_srcdir = $(realpath ../../../../../) + +CFLAGS += -I$(top_srcdir)/usr/include/ + +TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg za-fork za-ptrace +TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \ + rdvl-sme rdvl-sve \ + sve-test \ + ssve-test \ + za-test \ vlset +TEST_PROGS_EXTENDED := fpsimd-stress sve-stress ssve-stress za-stress -all: $(TEST_GEN_PROGS) $(TEST_PROGS_EXTENDED) +EXTRA_CLEAN += $(OUTPUT)/asm-utils.o $(OUTPUT)/rdvl.o $(OUTPUT)/za-fork-asm.o -fp-pidbench: fp-pidbench.S asm-utils.o +# Build with nolibc to avoid effects due to libc's clone() support +$(OUTPUT)/fp-pidbench: fp-pidbench.S $(OUTPUT)/asm-utils.o + $(CC) -nostdlib $^ -o $@ +$(OUTPUT)/fpsimd-test: fpsimd-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ -fpsimd-test: fpsimd-test.o asm-utils.o +$(OUTPUT)/rdvl-sve: rdvl-sve.c $(OUTPUT)/rdvl.o +$(OUTPUT)/rdvl-sme: rdvl-sme.c $(OUTPUT)/rdvl.o +$(OUTPUT)/sve-ptrace: sve-ptrace.c +$(OUTPUT)/sve-probe-vls: sve-probe-vls.c $(OUTPUT)/rdvl.o +$(OUTPUT)/sve-test: sve-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ -rdvl-sve: rdvl-sve.o rdvl.o -sve-ptrace: sve-ptrace.o -sve-probe-vls: sve-probe-vls.o rdvl.o -sve-test: sve-test.o asm-utils.o +$(OUTPUT)/ssve-test: sve-test.S $(OUTPUT)/asm-utils.o + $(CC) -DSSVE -nostdlib $^ -o $@ +$(OUTPUT)/vec-syscfg: vec-syscfg.c $(OUTPUT)/rdvl.o +$(OUTPUT)/vlset: vlset.c +$(OUTPUT)/za-fork: za-fork.c $(OUTPUT)/za-fork-asm.o + $(CC) -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ + -include ../../../../include/nolibc/nolibc.h \ + -static -ffreestanding -Wall $^ -o $@ +$(OUTPUT)/za-ptrace: za-ptrace.c +$(OUTPUT)/za-test: za-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ -vec-syscfg: vec-syscfg.o rdvl.o -vlset: vlset.o include ../../lib.mk diff --git a/tools/testing/selftests/arm64/fp/rdvl-sme.c b/tools/testing/selftests/arm64/fp/rdvl-sme.c new file mode 100644 index 000000000000..49b0b2e08bac --- /dev/null +++ b/tools/testing/selftests/arm64/fp/rdvl-sme.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <stdio.h> + +#include "rdvl.h" + +int main(void) +{ + int vl = rdvl_sme(); + + printf("%d\n", vl); + + return 0; +} diff --git a/tools/testing/selftests/arm64/fp/rdvl.S b/tools/testing/selftests/arm64/fp/rdvl.S index c916c1c9defd..20dc29996dc6 100644 --- a/tools/testing/selftests/arm64/fp/rdvl.S +++ b/tools/testing/selftests/arm64/fp/rdvl.S @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2021 ARM Limited. +#include "sme-inst.h" + .arch_extension sve .globl rdvl_sve @@ -8,3 +10,11 @@ rdvl_sve: hint 34 // BTI C rdvl x0, #1 ret + +.globl rdvl_sme +rdvl_sme: + hint 34 // BTI C + + rdsvl 0, 1 + + ret diff --git a/tools/testing/selftests/arm64/fp/rdvl.h b/tools/testing/selftests/arm64/fp/rdvl.h index 7c9d953fc9e7..5d323679fbc9 100644 --- a/tools/testing/selftests/arm64/fp/rdvl.h +++ b/tools/testing/selftests/arm64/fp/rdvl.h @@ -3,6 +3,7 @@ #ifndef RDVL_H #define RDVL_H +int rdvl_sme(void); int rdvl_sve(void); #endif diff --git a/tools/testing/selftests/arm64/fp/sme-inst.h b/tools/testing/selftests/arm64/fp/sme-inst.h new file mode 100644 index 000000000000..7191e53ca1c0 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/sme-inst.h @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021-2 ARM Limited. +// Original author: Mark Brown <broonie@kernel.org> + +#ifndef SME_INST_H +#define SME_INST_H + +/* + * RDSVL X\nx, #\imm + */ +.macro rdsvl nx, imm + .inst 0x4bf5800 \ + | (\imm << 5) \ + | (\nx) +.endm + +.macro smstop + msr S0_3_C4_C6_3, xzr +.endm + +.macro smstart_za + msr S0_3_C4_C5_3, xzr +.endm + +.macro smstart_sm + msr S0_3_C4_C3_3, xzr +.endm + +/* + * LDR (vector to ZA array): + * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _ldr_za nw, nxbase, offset=0 + .inst 0xe1000000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + +/* + * STR (vector from ZA array): + * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _str_za nw, nxbase, offset=0 + .inst 0xe1200000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + +#endif diff --git a/tools/testing/selftests/arm64/fp/ssve-stress b/tools/testing/selftests/arm64/fp/ssve-stress new file mode 100644 index 000000000000..e2bd2cc184ad --- /dev/null +++ b/tools/testing/selftests/arm64/fp/ssve-stress @@ -0,0 +1,59 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2015-2019 ARM Limited. +# Original author: Dave Martin <Dave.Martin@arm.com> + +set -ue + +NR_CPUS=`nproc` + +pids= +logs= + +cleanup () { + trap - INT TERM CHLD + set +e + + if [ -n "$pids" ]; then + kill $pids + wait $pids + pids= + fi + + if [ -n "$logs" ]; then + cat $logs + rm $logs + logs= + fi +} + +interrupt () { + cleanup + exit 0 +} + +child_died () { + cleanup + exit 1 +} + +trap interrupt INT TERM EXIT + +for x in `seq 0 $((NR_CPUS * 4))`; do + log=`mktemp` + logs=$logs\ $log + ./ssve-test >$log & + pids=$pids\ $! +done + +# Wait for all child processes to be created: +sleep 10 + +while :; do + kill -USR1 $pids +done & +pids=$pids\ $! + +wait + +exit 1 diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c index 4c418b2021e0..8c4847977583 100644 --- a/tools/testing/selftests/arm64/fp/sve-ptrace.c +++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c @@ -26,6 +26,10 @@ #define NT_ARM_SVE 0x405 #endif +#ifndef NT_ARM_SSVE +#define NT_ARM_SSVE 0x40b +#endif + struct vec_type { const char *name; unsigned long hwcap_type; @@ -42,11 +46,18 @@ static const struct vec_type vec_types[] = { .regset = NT_ARM_SVE, .prctl_set = PR_SVE_SET_VL, }, + { + .name = "Streaming SVE", + .hwcap_type = AT_HWCAP2, + .hwcap = HWCAP2_SME, + .regset = NT_ARM_SSVE, + .prctl_set = PR_SME_SET_VL, + }, }; -#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3) +#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 4) #define FLAG_TESTS 2 -#define FPSIMD_TESTS 3 +#define FPSIMD_TESTS 2 #define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types)) @@ -78,6 +89,15 @@ static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov); } +static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) +{ + struct iovec iov; + + iov.iov_base = fpsimd; + iov.iov_len = sizeof(*fpsimd); + return ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov); +} + static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type, void **buf, size_t *size) { @@ -240,28 +260,24 @@ static void check_u32(unsigned int vl, const char *reg, /* Access the FPSIMD registers via the SVE regset */ static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type) { - void *svebuf = NULL; - size_t svebufsz = 0; + void *svebuf; struct user_sve_header *sve; struct user_fpsimd_state *fpsimd, new_fpsimd; unsigned int i, j; unsigned char *p; + int ret; - /* New process should start with FPSIMD registers only */ - sve = get_sve(child, type, &svebuf, &svebufsz); - if (!sve) { - ksft_test_result_fail("get_sve(%s): %s\n", - type->name, strerror(errno)); - + svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); + if (!svebuf) { + ksft_test_result_fail("Failed to allocate FPSIMD buffer\n"); return; - } else { - ksft_test_result_pass("get_sve(%s FPSIMD)\n", type->name); } - ksft_test_result((sve->flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD, - "Got FPSIMD registers via %s\n", type->name); - if ((sve->flags & SVE_PT_REGS_MASK) != SVE_PT_REGS_FPSIMD) - goto out; + memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); + sve = svebuf; + sve->flags = SVE_PT_REGS_FPSIMD; + sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD); + sve->vl = 16; /* We don't care what the VL is */ /* Try to set a known FPSIMD state via PT_REGS_SVE */ fpsimd = (struct user_fpsimd_state *)((char *)sve + @@ -273,12 +289,11 @@ static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type) p[j] = j; } - if (set_sve(child, type, sve)) { - ksft_test_result_fail("set_sve(%s FPSIMD): %s\n", - type->name, strerror(errno)); - + ret = set_sve(child, type, sve); + ksft_test_result(ret == 0, "%s FPSIMD set via SVE: %d\n", + type->name, ret); + if (ret) goto out; - } /* Verify via the FPSIMD regset */ if (get_fpsimd(child, &new_fpsimd)) { @@ -395,7 +410,7 @@ out: free(write_buf); } -/* Validate attempting to set SVE data and read SVE data */ +/* Validate attempting to set SVE data and read it via the FPSIMD regset */ static void ptrace_set_sve_get_fpsimd_data(pid_t child, const struct vec_type *type, unsigned int vl) @@ -478,6 +493,115 @@ out: free(write_buf); } +/* Validate attempting to set FPSIMD data and read it via the SVE regset */ +static void ptrace_set_fpsimd_get_sve_data(pid_t child, + const struct vec_type *type, + unsigned int vl) +{ + void *read_buf = NULL; + unsigned char *p; + struct user_sve_header *read_sve; + unsigned int vq = sve_vq_from_vl(vl); + struct user_fpsimd_state write_fpsimd; + int ret, i, j; + size_t read_sve_size = 0; + size_t expected_size; + int errors = 0; + + if (__BYTE_ORDER == __BIG_ENDIAN) { + ksft_test_result_skip("Big endian not supported\n"); + return; + } + + for (i = 0; i < 32; ++i) { + p = (unsigned char *)&write_fpsimd.vregs[i]; + + for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j) + p[j] = j; + } + + ret = set_fpsimd(child, &write_fpsimd); + if (ret != 0) { + ksft_test_result_fail("Failed to set FPSIMD state: %d\n)", + ret); + return; + } + + if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { + ksft_test_result_fail("Failed to read %s VL %u data\n", + type->name, vl); + return; + } + read_sve = read_buf; + + if (read_sve->vl != vl) { + ksft_test_result_fail("Child VL != expected VL %d\n", + read_sve->vl, vl); + goto out; + } + + /* The kernel may return either SVE or FPSIMD format */ + switch (read_sve->flags & SVE_PT_REGS_MASK) { + case SVE_PT_REGS_FPSIMD: + expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD); + if (read_sve_size < expected_size) { + ksft_test_result_fail("Read %d bytes, expected %d\n", + read_sve_size, expected_size); + goto out; + } + + ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET, + sizeof(write_fpsimd)); + if (ret != 0) { + ksft_print_msg("Read FPSIMD data mismatch\n"); + errors++; + } + break; + + case SVE_PT_REGS_SVE: + expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); + if (read_sve_size < expected_size) { + ksft_test_result_fail("Read %d bytes, expected %d\n", + read_sve_size, expected_size); + goto out; + } + + for (i = 0; i < __SVE_NUM_ZREGS; i++) { + __uint128_t tmp = 0; + + /* + * Z regs are stored endianness invariant, this won't + * work for big endian + */ + memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), + sizeof(tmp)); + + if (tmp != write_fpsimd.vregs[i]) { + ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n", + type->name, vl, i, i); + errors++; + } + } + + check_u32(vl, "FPSR", &write_fpsimd.fpsr, + read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors); + check_u32(vl, "FPCR", &write_fpsimd.fpcr, + read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); + break; + default: + ksft_print_msg("Unexpected regs type %d\n", + read_sve->flags & SVE_PT_REGS_MASK); + errors++; + break; + } + + ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n", + type->name, vl); + +out: + free(read_buf); +} + static int do_parent(pid_t child) { int ret = EXIT_FAILURE; @@ -548,11 +672,9 @@ static int do_parent(pid_t child) if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { ptrace_sve_fpsimd(child, &vec_types[i]); } else { - ksft_test_result_skip("%s FPSIMD get via SVE\n", - vec_types[i].name); ksft_test_result_skip("%s FPSIMD set via SVE\n", vec_types[i].name); - ksft_test_result_skip("%s set read via FPSIMD\n", + ksft_test_result_skip("%s FPSIMD read\n", vec_types[i].name); } @@ -585,11 +707,14 @@ static int do_parent(pid_t child) if (vl_supported) { ptrace_set_sve_get_sve_data(child, &vec_types[i], vl); ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl); + ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl); } else { ksft_test_result_skip("%s set SVE get SVE for VL %d\n", vec_types[i].name, vl); ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n", vec_types[i].name, vl); + ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n", + vec_types[i].name, vl); } } } diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S index f5b1b48ffff2..589264231a2d 100644 --- a/tools/testing/selftests/arm64/fp/sve-test.S +++ b/tools/testing/selftests/arm64/fp/sve-test.S @@ -13,6 +13,7 @@ #include <asm/unistd.h> #include "assembler.h" #include "asm-offsets.h" +#include "sme-inst.h" #define NZR 32 #define NPR 16 @@ -156,6 +157,7 @@ endfunction // We fill the upper lanes of FFR with zeros. // Beware: corrupts P0. function setup_ffr +#ifndef SSVE mov x4, x30 and w0, w0, #0x3 @@ -178,6 +180,9 @@ function setup_ffr wrffr p0.b ret x4 +#else + ret +#endif endfunction // Trivial memory compare: compare x2 bytes starting at address x0 with @@ -260,6 +265,7 @@ endfunction // Beware -- corrupts P0. // Clobbers x0-x5. function check_ffr +#ifndef SSVE mov x3, x30 ldr x4, =scratch @@ -280,6 +286,9 @@ function check_ffr mov x2, x5 mov x30, x3 b memcmp +#else + ret +#endif endfunction // Any SVE register modified here can cause corruption in the main @@ -295,10 +304,12 @@ function irritator_handler movi v0.8b, #1 movi v9.16b, #2 movi v31.8b, #3 +#ifndef SSVE // And P0 rdffr p0.b // And FFR wrffr p15.b +#endif ret endfunction @@ -359,6 +370,11 @@ endfunction .globl _start function _start _start: +#ifdef SSVE + puts "Streaming mode " + smstart_sm +#endif + // Sanity-check and report the vector length rdvl x19, #8 @@ -407,6 +423,10 @@ _start: orr w2, w2, #SA_NODEFER bl setsignal +#ifdef SSVE + smstart_sm // syscalls will have exited streaming mode +#endif + mov x22, #0 // generation number, increments per iteration .Ltest_loop: rdvl x0, #8 diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c index c90658811a83..9bcfcdc34ee9 100644 --- a/tools/testing/selftests/arm64/fp/vec-syscfg.c +++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c @@ -51,6 +51,16 @@ static struct vec_data vec_data[] = { .prctl_set = PR_SVE_SET_VL, .default_vl_file = "/proc/sys/abi/sve_default_vector_length", }, + { + .name = "SME", + .hwcap_type = AT_HWCAP2, + .hwcap = HWCAP2_SME, + .rdvl = rdvl_sme, + .rdvl_binary = "./rdvl-sme", + .prctl_get = PR_SME_GET_VL, + .prctl_set = PR_SME_SET_VL, + .default_vl_file = "/proc/sys/abi/sme_default_vector_length", + }, }; static int stdio_read_integer(FILE *f, const char *what, int *val) diff --git a/tools/testing/selftests/arm64/fp/vlset.c b/tools/testing/selftests/arm64/fp/vlset.c index 308d27a68226..76912a581a95 100644 --- a/tools/testing/selftests/arm64/fp/vlset.c +++ b/tools/testing/selftests/arm64/fp/vlset.c @@ -22,12 +22,15 @@ static int inherit = 0; static int no_inherit = 0; static int force = 0; static unsigned long vl; +static int set_ctl = PR_SVE_SET_VL; +static int get_ctl = PR_SVE_GET_VL; static const struct option options[] = { { "force", no_argument, NULL, 'f' }, { "inherit", no_argument, NULL, 'i' }, { "max", no_argument, NULL, 'M' }, { "no-inherit", no_argument, &no_inherit, 1 }, + { "sme", no_argument, NULL, 's' }, { "help", no_argument, NULL, '?' }, {} }; @@ -50,6 +53,9 @@ static int parse_options(int argc, char **argv) case 'M': vl = SVE_VL_MAX; break; case 'f': force = 1; break; case 'i': inherit = 1; break; + case 's': set_ctl = PR_SME_SET_VL; + get_ctl = PR_SME_GET_VL; + break; case 0: break; default: goto error; } @@ -125,14 +131,14 @@ int main(int argc, char **argv) if (inherit) flags |= PR_SVE_VL_INHERIT; - t = prctl(PR_SVE_SET_VL, vl | flags); + t = prctl(set_ctl, vl | flags); if (t < 0) { fprintf(stderr, "%s: PR_SVE_SET_VL: %s\n", program_name, strerror(errno)); goto error; } - t = prctl(PR_SVE_GET_VL); + t = prctl(get_ctl); if (t == -1) { fprintf(stderr, "%s: PR_SVE_GET_VL: %s\n", program_name, strerror(errno)); diff --git a/tools/testing/selftests/arm64/fp/za-fork-asm.S b/tools/testing/selftests/arm64/fp/za-fork-asm.S new file mode 100644 index 000000000000..2fafadd491c3 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-fork-asm.S @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021 ARM Limited. + +#include "sme-inst.h" + +.arch_extension sve + +#define MAGIC 42 + +#define MAXVL 2048 +#define MAXVL_B (MAXVL / 8) + +.pushsection .text +.data +.align 4 +scratch: + .space MAXVL_B +.popsection + +.globl fork_test +fork_test: + smstart_za + + // For simplicity just set one word in one vector, other tests + // cover general data corruption issues. + ldr x0, =scratch + mov x1, #MAGIC + str x1, [x0] + mov w12, wzr + _ldr_za 12, 0 // ZA.H[W12] loaded from [X0] + + // Tail call into the C portion that does the fork & verify + b fork_test_c + +.globl verify_fork +verify_fork: + // SVCR should have ZA=1, SM=0 + mrs x0, S3_3_C4_C2_2 + and x1, x0, #3 + cmp x1, #2 + beq 1f + mov x0, xzr + b 100f +1: + + // ZA should still have the value we loaded + ldr x0, =scratch + mov w12, wzr + _str_za 12, 0 // ZA.H[W12] stored to [X0] + ldr x1, [x0] + cmp x1, #MAGIC + beq 2f + mov x0, xzr + b 100f + +2: + // All tests passed + mov x0, #1 +100: + ret + diff --git a/tools/testing/selftests/arm64/fp/za-fork.c b/tools/testing/selftests/arm64/fp/za-fork.c new file mode 100644 index 000000000000..ff475c649e96 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-fork.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 ARM Limited. + * Original author: Mark Brown <broonie@kernel.org> + */ + +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/sched.h> +#include <linux/wait.h> + +#define EXPECTED_TESTS 1 + +static void putstr(const char *str) +{ + write(1, str, strlen(str)); +} + +static void putnum(unsigned int num) +{ + char c; + + if (num / 10) + putnum(num / 10); + + c = '0' + (num % 10); + write(1, &c, 1); +} + +static int tests_run; +static int tests_passed; +static int tests_failed; +static int tests_skipped; + +static void print_summary(void) +{ + if (tests_passed + tests_failed + tests_skipped != EXPECTED_TESTS) + putstr("# UNEXPECTED TEST COUNT: "); + + putstr("# Totals: pass:"); + putnum(tests_passed); + putstr(" fail:"); + putnum(tests_failed); + putstr(" xfail:0 xpass:0 skip:"); + putnum(tests_skipped); + putstr(" error:0\n"); +} + +int fork_test(void); +int verify_fork(void); + +/* + * If we fork the value in the parent should be unchanged and the + * child should start with the same value. This is called from the + * fork_test() asm function. + */ +int fork_test_c(void) +{ + pid_t newpid, waiting; + int child_status, parent_result; + + newpid = fork(); + if (newpid == 0) { + /* In child */ + if (!verify_fork()) { + putstr("# ZA state invalid in child\n"); + exit(0); + } else { + exit(1); + } + } + if (newpid < 0) { + putstr("# fork() failed: -"); + putnum(-newpid); + putstr("\n"); + return 0; + } + + parent_result = verify_fork(); + if (!parent_result) + putstr("# ZA state invalid in parent\n"); + + for (;;) { + waiting = waitpid(newpid, &child_status, 0); + + if (waiting < 0) { + if (errno == EINTR) + continue; + putstr("# waitpid() failed: "); + putnum(errno); + putstr("\n"); + return 0; + } + if (waiting != newpid) { + putstr("# waitpid() returned wrong PID\n"); + return 0; + } + + if (!WIFEXITED(child_status)) { + putstr("# child did not exit\n"); + return 0; + } + + return WEXITSTATUS(child_status) && parent_result; + } +} + +#define run_test(name) \ + if (name()) { \ + tests_passed++; \ + } else { \ + tests_failed++; \ + putstr("not "); \ + } \ + putstr("ok "); \ + putnum(++tests_run); \ + putstr(" " #name "\n"); + +int main(int argc, char **argv) +{ + int ret, i; + + putstr("TAP version 13\n"); + putstr("1.."); + putnum(EXPECTED_TESTS); + putstr("\n"); + + putstr("# PID: "); + putnum(getpid()); + putstr("\n"); + + /* + * This test is run with nolibc which doesn't support hwcap and + * it's probably disproportionate to implement so instead check + * for the default vector length configuration in /proc. + */ + ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0); + if (ret >= 0) { + run_test(fork_test); + + } else { + putstr("# SME support not present\n"); + + for (i = 0; i < EXPECTED_TESTS; i++) { + putstr("ok "); + putnum(i); + putstr(" skipped\n"); + } + + tests_skipped += EXPECTED_TESTS; + } + + print_summary(); + + return 0; +} diff --git a/tools/testing/selftests/arm64/fp/za-ptrace.c b/tools/testing/selftests/arm64/fp/za-ptrace.c new file mode 100644 index 000000000000..bf6158654056 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-ptrace.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 ARM Limited. + */ +#include <errno.h> +#include <stdbool.h> +#include <stddef.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <sys/auxv.h> +#include <sys/prctl.h> +#include <sys/ptrace.h> +#include <sys/types.h> +#include <sys/uio.h> +#include <sys/wait.h> +#include <asm/sigcontext.h> +#include <asm/ptrace.h> + +#include "../../kselftest.h" + +/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */ +#ifndef NT_ARM_ZA +#define NT_ARM_ZA 0x40c +#endif + +#define EXPECTED_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3) + +static void fill_buf(char *buf, size_t size) +{ + int i; + + for (i = 0; i < size; i++) + buf[i] = random(); +} + +static int do_child(void) +{ + if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) + ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno)); + + if (raise(SIGSTOP)) + ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno)); + + return EXIT_SUCCESS; +} + +static struct user_za_header *get_za(pid_t pid, void **buf, size_t *size) +{ + struct user_za_header *za; + void *p; + size_t sz = sizeof(*za); + struct iovec iov; + + while (1) { + if (*size < sz) { + p = realloc(*buf, sz); + if (!p) { + errno = ENOMEM; + goto error; + } + + *buf = p; + *size = sz; + } + + iov.iov_base = *buf; + iov.iov_len = sz; + if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZA, &iov)) + goto error; + + za = *buf; + if (za->size <= sz) + break; + + sz = za->size; + } + + return za; + +error: + return NULL; +} + +static int set_za(pid_t pid, const struct user_za_header *za) +{ + struct iovec iov; + + iov.iov_base = (void *)za; + iov.iov_len = za->size; + return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZA, &iov); +} + +/* Validate attempting to set the specfied VL via ptrace */ +static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported) +{ + struct user_za_header za; + struct user_za_header *new_za = NULL; + size_t new_za_size = 0; + int ret, prctl_vl; + + *supported = false; + + /* Check if the VL is supported in this process */ + prctl_vl = prctl(PR_SME_SET_VL, vl); + if (prctl_vl == -1) + ksft_exit_fail_msg("prctl(PR_SME_SET_VL) failed: %s (%d)\n", + strerror(errno), errno); + + /* If the VL is not supported then a supported VL will be returned */ + *supported = (prctl_vl == vl); + + /* Set the VL by doing a set with no register payload */ + memset(&za, 0, sizeof(za)); + za.size = sizeof(za); + za.vl = vl; + ret = set_za(child, &za); + if (ret != 0) { + ksft_test_result_fail("Failed to set VL %u\n", vl); + return; + } + + /* + * Read back the new register state and verify that we have the + * same VL that we got from prctl() on ourselves. + */ + if (!get_za(child, (void **)&new_za, &new_za_size)) { + ksft_test_result_fail("Failed to read VL %u\n", vl); + return; + } + + ksft_test_result(new_za->vl = prctl_vl, "Set VL %u\n", vl); + + free(new_za); +} + +/* Validate attempting to set no ZA data and read it back */ +static void ptrace_set_no_data(pid_t child, unsigned int vl) +{ + void *read_buf = NULL; + struct user_za_header write_za; + struct user_za_header *read_za; + size_t read_za_size = 0; + int ret; + + /* Set up some data and write it out */ + memset(&write_za, 0, sizeof(write_za)); + write_za.size = ZA_PT_ZA_OFFSET; + write_za.vl = vl; + + ret = set_za(child, &write_za); + if (ret != 0) { + ksft_test_result_fail("Failed to set VL %u no data\n", vl); + return; + } + + /* Read the data back */ + if (!get_za(child, (void **)&read_buf, &read_za_size)) { + ksft_test_result_fail("Failed to read VL %u no data\n", vl); + return; + } + read_za = read_buf; + + /* We might read more data if there's extensions we don't know */ + if (read_za->size < write_za.size) { + ksft_test_result_fail("VL %u wrote %d bytes, only read %d\n", + vl, write_za.size, read_za->size); + goto out_read; + } + + ksft_test_result(read_za->size == write_za.size, + "Disabled ZA for VL %u\n", vl); + +out_read: + free(read_buf); +} + +/* Validate attempting to set data and read it back */ +static void ptrace_set_get_data(pid_t child, unsigned int vl) +{ + void *write_buf; + void *read_buf = NULL; + struct user_za_header *write_za; + struct user_za_header *read_za; + size_t read_za_size = 0; + unsigned int vq = sve_vq_from_vl(vl); + int ret; + size_t data_size; + + data_size = ZA_PT_SIZE(vq); + write_buf = malloc(data_size); + if (!write_buf) { + ksft_test_result_fail("Error allocating %d byte buffer for VL %u\n", + data_size, vl); + return; + } + write_za = write_buf; + + /* Set up some data and write it out */ + memset(write_za, 0, data_size); + write_za->size = data_size; + write_za->vl = vl; + + fill_buf(write_buf + ZA_PT_ZA_OFFSET, ZA_PT_ZA_SIZE(vq)); + + ret = set_za(child, write_za); + if (ret != 0) { + ksft_test_result_fail("Failed to set VL %u data\n", vl); + goto out; + } + + /* Read the data back */ + if (!get_za(child, (void **)&read_buf, &read_za_size)) { + ksft_test_result_fail("Failed to read VL %u data\n", vl); + goto out; + } + read_za = read_buf; + + /* We might read more data if there's extensions we don't know */ + if (read_za->size < write_za->size) { + ksft_test_result_fail("VL %u wrote %d bytes, only read %d\n", + vl, write_za->size, read_za->size); + goto out_read; + } + + ksft_test_result(memcmp(write_buf + ZA_PT_ZA_OFFSET, + read_buf + ZA_PT_ZA_OFFSET, + ZA_PT_ZA_SIZE(vq)) == 0, + "Data match for VL %u\n", vl); + +out_read: + free(read_buf); +out: + free(write_buf); +} + +static int do_parent(pid_t child) +{ + int ret = EXIT_FAILURE; + pid_t pid; + int status; + siginfo_t si; + unsigned int vq, vl; + bool vl_supported; + + /* Attach to the child */ + while (1) { + int sig; + + pid = wait(&status); + if (pid == -1) { + perror("wait"); + goto error; + } + + /* + * This should never happen but it's hard to flag in + * the framework. + */ + if (pid != child) + continue; + + if (WIFEXITED(status) || WIFSIGNALED(status)) + ksft_exit_fail_msg("Child died unexpectedly\n"); + + if (!WIFSTOPPED(status)) + goto error; + + sig = WSTOPSIG(status); + + if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) { + if (errno == ESRCH) + goto disappeared; + + if (errno == EINVAL) { + sig = 0; /* bust group-stop */ + goto cont; + } + + ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n", + strerror(errno)); + goto error; + } + + if (sig == SIGSTOP && si.si_code == SI_TKILL && + si.si_pid == pid) + break; + + cont: + if (ptrace(PTRACE_CONT, pid, NULL, sig)) { + if (errno == ESRCH) + goto disappeared; + + ksft_test_result_fail("PTRACE_CONT: %s\n", + strerror(errno)); + goto error; + } + } + + ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); + + /* Step through every possible VQ */ + for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { + vl = sve_vl_from_vq(vq); + + /* First, try to set this vector length */ + ptrace_set_get_vl(child, vl, &vl_supported); + + /* If the VL is supported validate data set/get */ + if (vl_supported) { + ptrace_set_no_data(child, vl); + ptrace_set_get_data(child, vl); + } else { + ksft_test_result_skip("Disabled ZA for VL %u\n", vl); + ksft_test_result_skip("Get and set data for VL %u\n", + vl); + } + } + + ret = EXIT_SUCCESS; + +error: + kill(child, SIGKILL); + +disappeared: + return ret; +} + +int main(void) +{ + int ret = EXIT_SUCCESS; + pid_t child; + + srandom(getpid()); + + ksft_print_header(); + + if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) { + ksft_set_plan(1); + ksft_exit_skip("SME not available\n"); + } + + ksft_set_plan(EXPECTED_TESTS); + + child = fork(); + if (!child) + return do_child(); + + if (do_parent(child)) + ret = EXIT_FAILURE; + + ksft_print_cnts(); + + return ret; +} diff --git a/tools/testing/selftests/arm64/fp/za-stress b/tools/testing/selftests/arm64/fp/za-stress new file mode 100644 index 000000000000..5ac386b55b95 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-stress @@ -0,0 +1,59 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2015-2019 ARM Limited. +# Original author: Dave Martin <Dave.Martin@arm.com> + +set -ue + +NR_CPUS=`nproc` + +pids= +logs= + +cleanup () { + trap - INT TERM CHLD + set +e + + if [ -n "$pids" ]; then + kill $pids + wait $pids + pids= + fi + + if [ -n "$logs" ]; then + cat $logs + rm $logs + logs= + fi +} + +interrupt () { + cleanup + exit 0 +} + +child_died () { + cleanup + exit 1 +} + +trap interrupt INT TERM EXIT + +for x in `seq 0 $((NR_CPUS * 4))`; do + log=`mktemp` + logs=$logs\ $log + ./za-test >$log & + pids=$pids\ $! +done + +# Wait for all child processes to be created: +sleep 10 + +while :; do + kill -USR1 $pids +done & +pids=$pids\ $! + +wait + +exit 1 diff --git a/tools/testing/selftests/arm64/fp/za-test.S b/tools/testing/selftests/arm64/fp/za-test.S new file mode 100644 index 000000000000..9ab6f9cd9623 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-test.S @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021 ARM Limited. +// Original author: Mark Brown <broonie@kernel.org> +// +// Scalable Matrix Extension ZA context switch test +// Repeatedly writes unique test patterns into each ZA tile +// and reads them back to verify integrity. +// +// for x in `seq 1 NR_CPUS`; do sve-test & pids=$pids\ $! ; done +// (leave it running for as long as you want...) +// kill $pids + +#include <asm/unistd.h> +#include "assembler.h" +#include "asm-offsets.h" +#include "sme-inst.h" + +.arch_extension sve + +#define MAXVL 2048 +#define MAXVL_B (MAXVL / 8) + +// Declare some storage space to shadow ZA register contents and a +// scratch buffer for a vector. +.pushsection .text +.data +.align 4 +zaref: + .space MAXVL_B * MAXVL_B +scratch: + .space MAXVL_B +.popsection + +// Trivial memory copy: copy x2 bytes, starting at address x1, to address x0. +// Clobbers x0-x3 +function memcpy + cmp x2, #0 + b.eq 1f +0: ldrb w3, [x1], #1 + strb w3, [x0], #1 + subs x2, x2, #1 + b.ne 0b +1: ret +endfunction + +// Generate a test pattern for storage in ZA +// x0: pid +// x1: row in ZA +// x2: generation + +// These values are used to constuct a 32-bit pattern that is repeated in the +// scratch buffer as many times as will fit: +// bits 31:28 generation number (increments once per test_loop) +// bits 27:16 pid +// bits 15: 8 row number +// bits 7: 0 32-bit lane index + +function pattern + mov w3, wzr + bfi w3, w0, #16, #12 // PID + bfi w3, w1, #8, #8 // Row + bfi w3, w2, #28, #4 // Generation + + ldr x0, =scratch + mov w1, #MAXVL_B / 4 + +0: str w3, [x0], #4 + add w3, w3, #1 // Lane + subs w1, w1, #1 + b.ne 0b + + ret +endfunction + +// Get the address of shadow data for ZA horizontal vector xn +.macro _adrza xd, xn, nrtmp + ldr \xd, =zaref + rdsvl \nrtmp, 1 + madd \xd, x\nrtmp, \xn, \xd +.endm + +// Set up test pattern in a ZA horizontal vector +// x0: pid +// x1: row number +// x2: generation +function setup_za + mov x4, x30 + mov x12, x1 // Use x12 for vector select + + bl pattern // Get pattern in scratch buffer + _adrza x0, x12, 2 // Shadow buffer pointer to x0 and x5 + mov x5, x0 + ldr x1, =scratch + bl memcpy // length set up in x2 by _adrza + + _ldr_za 12, 5 // load vector w12 from pointer x5 + + ret x4 +endfunction + +// Trivial memory compare: compare x2 bytes starting at address x0 with +// bytes starting at address x1. +// Returns only if all bytes match; otherwise, the program is aborted. +// Clobbers x0-x5. +function memcmp + cbz x2, 2f + + stp x0, x1, [sp, #-0x20]! + str x2, [sp, #0x10] + + mov x5, #0 +0: ldrb w3, [x0, x5] + ldrb w4, [x1, x5] + add x5, x5, #1 + cmp w3, w4 + b.ne 1f + subs x2, x2, #1 + b.ne 0b + +1: ldr x2, [sp, #0x10] + ldp x0, x1, [sp], #0x20 + b.ne barf + +2: ret +endfunction + +// Verify that a ZA vector matches its shadow in memory, else abort +// x0: row number +// Clobbers x0-x7 and x12. +function check_za + mov x3, x30 + + mov x12, x0 + _adrza x5, x0, 6 // pointer to expected value in x5 + mov x4, x0 + ldr x7, =scratch // x7 is scratch + + mov x0, x7 // Poison scratch + mov x1, x6 + bl memfill_ae + + _str_za 12, 7 // save vector w12 to pointer x7 + + mov x0, x5 + mov x1, x7 + mov x2, x6 + mov x30, x3 + b memcmp +endfunction + +// Any SME register modified here can cause corruption in the main +// thread -- but *only* the locations modified here. +function irritator_handler + // Increment the irritation signal count (x23): + ldr x0, [x2, #ucontext_regs + 8 * 23] + add x0, x0, #1 + str x0, [x2, #ucontext_regs + 8 * 23] + + // Corrupt some random ZA data +#if 0 + adr x0, .text + (irritator_handler - .text) / 16 * 16 + movi v0.8b, #1 + movi v9.16b, #2 + movi v31.8b, #3 +#endif + + ret +endfunction + +function terminate_handler + mov w21, w0 + mov x20, x2 + + puts "Terminated by signal " + mov w0, w21 + bl putdec + puts ", no error, iterations=" + ldr x0, [x20, #ucontext_regs + 8 * 22] + bl putdec + puts ", signals=" + ldr x0, [x20, #ucontext_regs + 8 * 23] + bl putdecn + + mov x0, #0 + mov x8, #__NR_exit + svc #0 +endfunction + +// w0: signal number +// x1: sa_action +// w2: sa_flags +// Clobbers x0-x6,x8 +function setsignal + str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]! + + mov w4, w0 + mov x5, x1 + mov w6, w2 + + add x0, sp, #16 + mov x1, #sa_sz + bl memclr + + mov w0, w4 + add x1, sp, #16 + str w6, [x1, #sa_flags] + str x5, [x1, #sa_handler] + mov x2, #0 + mov x3, #sa_mask_sz + mov x8, #__NR_rt_sigaction + svc #0 + + cbz w0, 1f + + puts "sigaction failure\n" + b .Labort + +1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16) + ret +endfunction + +// Main program entry point +.globl _start +function _start +_start: + puts "Streaming mode " + smstart_za + + // Sanity-check and report the vector length + + rdsvl 19, 8 + cmp x19, #128 + b.lo 1f + cmp x19, #2048 + b.hi 1f + tst x19, #(8 - 1) + b.eq 2f + +1: puts "bad vector length: " + mov x0, x19 + bl putdecn + b .Labort + +2: puts "vector length:\t" + mov x0, x19 + bl putdec + puts " bits\n" + + // Obtain our PID, to ensure test pattern uniqueness between processes + mov x8, #__NR_getpid + svc #0 + mov x20, x0 + + puts "PID:\t" + mov x0, x20 + bl putdecn + + mov x23, #0 // Irritation signal count + + mov w0, #SIGINT + adr x1, terminate_handler + mov w2, #SA_SIGINFO + bl setsignal + + mov w0, #SIGTERM + adr x1, terminate_handler + mov w2, #SA_SIGINFO + bl setsignal + + mov w0, #SIGUSR1 + adr x1, irritator_handler + mov w2, #SA_SIGINFO + orr w2, w2, #SA_NODEFER + bl setsignal + + mov x22, #0 // generation number, increments per iteration +.Ltest_loop: + rdsvl 0, 8 + cmp x0, x19 + b.ne vl_barf + + rdsvl 21, 1 // Set up ZA & shadow with test pattern +0: mov x0, x20 + sub x1, x21, #1 + mov x2, x22 + bl setup_za + subs x21, x21, #1 + b.ne 0b + + and x8, x22, #127 // Every 128 interations... + cbz x8, 0f + mov x8, #__NR_getpid // (otherwise minimal syscall) + b 1f +0: + mov x8, #__NR_sched_yield // ...encourage preemption +1: + svc #0 + + mrs x0, S3_3_C4_C2_2 // SVCR should have ZA=1,SM=0 + and x1, x0, #3 + cmp x1, #2 + b.ne svcr_barf + + rdsvl 21, 1 // Verify that the data made it through + rdsvl 24, 1 // Verify that the data made it through +0: sub x0, x24, x21 + bl check_za + subs x21, x21, #1 + bne 0b + + add x22, x22, #1 // Everything still working + b .Ltest_loop + +.Labort: + mov x0, #0 + mov x1, #SIGABRT + mov x8, #__NR_kill + svc #0 +endfunction + +function barf +// fpsimd.c acitivty log dump hack +// ldr w0, =0xdeadc0de +// mov w8, #__NR_exit +// svc #0 +// end hack + smstop + mov x10, x0 // expected data + mov x11, x1 // actual data + mov x12, x2 // data size + + puts "Mismatch: PID=" + mov x0, x20 + bl putdec + puts ", iteration=" + mov x0, x22 + bl putdec + puts ", row=" + mov x0, x21 + bl putdecn + puts "\tExpected [" + mov x0, x10 + mov x1, x12 + bl dumphex + puts "]\n\tGot [" + mov x0, x11 + mov x1, x12 + bl dumphex + puts "]\n" + + mov x8, #__NR_getpid + svc #0 +// fpsimd.c acitivty log dump hack +// ldr w0, =0xdeadc0de +// mov w8, #__NR_exit +// svc #0 +// ^ end of hack + mov x1, #SIGABRT + mov x8, #__NR_kill + svc #0 +// mov x8, #__NR_exit +// mov x1, #1 +// svc #0 +endfunction + +function vl_barf + mov x10, x0 + + puts "Bad active VL: " + mov x0, x10 + bl putdecn + + mov x8, #__NR_exit + mov x1, #1 + svc #0 +endfunction + +function svcr_barf + mov x10, x0 + + puts "Bad SVCR: " + mov x0, x10 + bl putdecn + + mov x8, #__NR_exit + mov x1, #1 + svc #0 +endfunction diff --git a/tools/testing/selftests/arm64/mte/.gitignore b/tools/testing/selftests/arm64/mte/.gitignore index d1fe4ddf1669..052d0f9f92b3 100644 --- a/tools/testing/selftests/arm64/mte/.gitignore +++ b/tools/testing/selftests/arm64/mte/.gitignore @@ -3,5 +3,6 @@ check_gcr_el1_cswitch check_tags_inclusion check_child_memory check_mmap_options +check_prctl check_ksm_options check_user_mem diff --git a/tools/testing/selftests/arm64/mte/check_child_memory.c b/tools/testing/selftests/arm64/mte/check_child_memory.c index 43bd94f853ba..7597fc632cad 100644 --- a/tools/testing/selftests/arm64/mte/check_child_memory.c +++ b/tools/testing/selftests/arm64/mte/check_child_memory.c @@ -85,9 +85,9 @@ static int check_child_memory_mapping(int mem_type, int mode, int mapping) { char *ptr; int run, result; - int item = sizeof(sizes)/sizeof(int); + int item = ARRAY_SIZE(sizes); - item = sizeof(sizes)/sizeof(int); + item = ARRAY_SIZE(sizes); mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG); for (run = 0; run < item; run++) { ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, @@ -107,7 +107,7 @@ static int check_child_file_mapping(int mem_type, int mode, int mapping) { char *ptr, *map_ptr; int run, fd, map_size, result = KSFT_PASS; - int total = sizeof(sizes)/sizeof(int); + int total = ARRAY_SIZE(sizes); mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG); for (run = 0; run < total; run++) { @@ -144,7 +144,7 @@ static int check_child_file_mapping(int mem_type, int mode, int mapping) int main(int argc, char *argv[]) { int err; - int item = sizeof(sizes)/sizeof(int); + int item = ARRAY_SIZE(sizes); page_size = getpagesize(); if (!page_size) { diff --git a/tools/testing/selftests/arm64/mte/check_prctl.c b/tools/testing/selftests/arm64/mte/check_prctl.c new file mode 100644 index 000000000000..f139a33a43ef --- /dev/null +++ b/tools/testing/selftests/arm64/mte/check_prctl.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 ARM Limited + +#include <stdbool.h> +#include <stdio.h> +#include <string.h> + +#include <sys/auxv.h> +#include <sys/prctl.h> + +#include <asm/hwcap.h> + +#include "kselftest.h" + +static int set_tagged_addr_ctrl(int val) +{ + int ret; + + ret = prctl(PR_SET_TAGGED_ADDR_CTRL, val, 0, 0, 0); + if (ret < 0) + ksft_print_msg("PR_SET_TAGGED_ADDR_CTRL: failed %d %d (%s)\n", + ret, errno, strerror(errno)); + return ret; +} + +static int get_tagged_addr_ctrl(void) +{ + int ret; + + ret = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0); + if (ret < 0) + ksft_print_msg("PR_GET_TAGGED_ADDR_CTRL failed: %d %d (%s)\n", + ret, errno, strerror(errno)); + return ret; +} + +/* + * Read the current mode without having done any configuration, should + * run first. + */ +void check_basic_read(void) +{ + int ret; + + ret = get_tagged_addr_ctrl(); + if (ret < 0) { + ksft_test_result_fail("check_basic_read\n"); + return; + } + + if (ret & PR_MTE_TCF_SYNC) + ksft_print_msg("SYNC enabled\n"); + if (ret & PR_MTE_TCF_ASYNC) + ksft_print_msg("ASYNC enabled\n"); + + /* Any configuration is valid */ + ksft_test_result_pass("check_basic_read\n"); +} + +/* + * Attempt to set a specified combination of modes. + */ +void set_mode_test(const char *name, int hwcap2, int mask) +{ + int ret; + + if ((getauxval(AT_HWCAP2) & hwcap2) != hwcap2) { + ksft_test_result_skip("%s\n", name); + return; + } + + ret = set_tagged_addr_ctrl(mask); + if (ret < 0) { + ksft_test_result_fail("%s\n", name); + return; + } + + ret = get_tagged_addr_ctrl(); + if (ret < 0) { + ksft_test_result_fail("%s\n", name); + return; + } + + if ((ret & PR_MTE_TCF_MASK) == mask) { + ksft_test_result_pass("%s\n", name); + } else { + ksft_print_msg("Got %x, expected %x\n", + (ret & PR_MTE_TCF_MASK), mask); + ksft_test_result_fail("%s\n", name); + } +} + +struct mte_mode { + int mask; + int hwcap2; + const char *name; +} mte_modes[] = { + { PR_MTE_TCF_NONE, 0, "NONE" }, + { PR_MTE_TCF_SYNC, HWCAP2_MTE, "SYNC" }, + { PR_MTE_TCF_ASYNC, HWCAP2_MTE, "ASYNC" }, + { PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC, HWCAP2_MTE, "SYNC+ASYNC" }, +}; + +int main(void) +{ + int i; + + ksft_print_header(); + ksft_set_plan(5); + + check_basic_read(); + for (i = 0; i < ARRAY_SIZE(mte_modes); i++) + set_mode_test(mte_modes[i].name, mte_modes[i].hwcap2, + mte_modes[i].mask); + + ksft_print_cnts(); + + return 0; +} diff --git a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c index deaef1f61076..2b1425b92b69 100644 --- a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c +++ b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c @@ -23,10 +23,13 @@ static int verify_mte_pointer_validity(char *ptr, int mode) { mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE); /* Check the validity of the tagged pointer */ - memset((void *)ptr, '1', BUFFER_SIZE); + memset(ptr, '1', BUFFER_SIZE); mte_wait_after_trig(); - if (cur_mte_cxt.fault_valid) + if (cur_mte_cxt.fault_valid) { + ksft_print_msg("Unexpected fault recorded for %p-%p in mode %x\n", + ptr, ptr + BUFFER_SIZE, mode); return KSFT_FAIL; + } /* Proceed further for nonzero tags */ if (!MT_FETCH_TAG((uintptr_t)ptr)) return KSFT_PASS; @@ -34,27 +37,32 @@ static int verify_mte_pointer_validity(char *ptr, int mode) /* Check the validity outside the range */ ptr[BUFFER_SIZE] = '2'; mte_wait_after_trig(); - if (!cur_mte_cxt.fault_valid) + if (!cur_mte_cxt.fault_valid) { + ksft_print_msg("No valid fault recorded for %p in mode %x\n", + ptr, mode); return KSFT_FAIL; - else + } else { return KSFT_PASS; + } } static int check_single_included_tags(int mem_type, int mode) { char *ptr; - int tag, run, result = KSFT_PASS; + int tag, run, ret, result = KSFT_PASS; - ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); + ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; for (tag = 0; (tag < MT_TAG_COUNT) && (result == KSFT_PASS); tag++) { - mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag)); + ret = mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag)); + if (ret != 0) + result = KSFT_FAIL; /* Try to catch a excluded tag by a number of tries. */ for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { - ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); + ptr = mte_insert_tags(ptr, BUFFER_SIZE); /* Check tag value */ if (MT_FETCH_TAG((uintptr_t)ptr) == tag) { ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n", @@ -66,7 +74,7 @@ static int check_single_included_tags(int mem_type, int mode) result = verify_mte_pointer_validity(ptr, mode); } } - mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); + mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); return result; } @@ -76,7 +84,7 @@ static int check_multiple_included_tags(int mem_type, int mode) int tag, run, result = KSFT_PASS; unsigned long excl_mask = 0; - ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); + ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; @@ -86,7 +94,7 @@ static int check_multiple_included_tags(int mem_type, int mode) mte_switch_mode(mode, MT_INCLUDE_VALID_TAGS(excl_mask)); /* Try to catch a excluded tag by a number of tries. */ for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { - ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); + ptr = mte_insert_tags(ptr, BUFFER_SIZE); /* Check tag value */ if (MT_FETCH_TAG((uintptr_t)ptr) < tag) { ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n", @@ -98,21 +106,23 @@ static int check_multiple_included_tags(int mem_type, int mode) result = verify_mte_pointer_validity(ptr, mode); } } - mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); + mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); return result; } static int check_all_included_tags(int mem_type, int mode) { char *ptr; - int run, result = KSFT_PASS; + int run, ret, result = KSFT_PASS; - ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); + ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; - mte_switch_mode(mode, MT_INCLUDE_TAG_MASK); + ret = mte_switch_mode(mode, MT_INCLUDE_TAG_MASK); + if (ret != 0) + return KSFT_FAIL; /* Try to catch a excluded tag by a number of tries. */ for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); @@ -122,20 +132,22 @@ static int check_all_included_tags(int mem_type, int mode) */ result = verify_mte_pointer_validity(ptr, mode); } - mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); + mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); return result; } static int check_none_included_tags(int mem_type, int mode) { char *ptr; - int run; + int run, ret; - ptr = (char *)mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false); + ptr = mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; - mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK); + ret = mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK); + if (ret != 0) + return KSFT_FAIL; /* Try to catch a excluded tag by a number of tries. */ for (run = 0; run < RUNS; run++) { ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); @@ -147,12 +159,12 @@ static int check_none_included_tags(int mem_type, int mode) } mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE); /* Check the write validity of the untagged pointer */ - memset((void *)ptr, '1', BUFFER_SIZE); + memset(ptr, '1', BUFFER_SIZE); mte_wait_after_trig(); if (cur_mte_cxt.fault_valid) break; } - mte_free_memory((void *)ptr, BUFFER_SIZE, mem_type, false); + mte_free_memory(ptr, BUFFER_SIZE, mem_type, false); if (cur_mte_cxt.fault_valid) return KSFT_FAIL; else diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c index 0328a1e08f65..00ffd34c66d3 100644 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c @@ -37,6 +37,10 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc) if (si->si_code == SEGV_MTEAERR) { if (cur_mte_cxt.trig_si_code == si->si_code) cur_mte_cxt.fault_valid = true; + else + ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=$lx, fault addr=%lx\n", + ((ucontext_t *)uc)->uc_mcontext.pc, + addr); return; } /* Compare the context for precise error */ @@ -124,13 +128,16 @@ static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping, int prot_flag, map_flag; size_t entire_size = size + range_before + range_after; - if (mem_type != USE_MALLOC && mem_type != USE_MMAP && - mem_type != USE_MPROTECT) { + switch (mem_type) { + case USE_MALLOC: + return malloc(entire_size) + range_before; + case USE_MMAP: + case USE_MPROTECT: + break; + default: ksft_print_msg("FAIL: Invalid allocate request\n"); return NULL; } - if (mem_type == USE_MALLOC) - return malloc(entire_size) + range_before; prot_flag = PROT_READ | PROT_WRITE; if (mem_type == USE_MMAP) @@ -269,18 +276,33 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask) { unsigned long en = 0; - if (!(mte_option == MTE_SYNC_ERR || mte_option == MTE_ASYNC_ERR || - mte_option == MTE_NONE_ERR || incl_mask <= MTE_ALLOW_NON_ZERO_TAG)) { - ksft_print_msg("FAIL: Invalid mte config option\n"); + switch (mte_option) { + case MTE_NONE_ERR: + case MTE_SYNC_ERR: + case MTE_ASYNC_ERR: + break; + default: + ksft_print_msg("FAIL: Invalid MTE option %x\n", mte_option); + return -EINVAL; + } + + if (incl_mask & ~MT_INCLUDE_TAG_MASK) { + ksft_print_msg("FAIL: Invalid incl_mask %lx\n", incl_mask); return -EINVAL; } + en = PR_TAGGED_ADDR_ENABLE; - if (mte_option == MTE_SYNC_ERR) + switch (mte_option) { + case MTE_SYNC_ERR: en |= PR_MTE_TCF_SYNC; - else if (mte_option == MTE_ASYNC_ERR) + break; + case MTE_ASYNC_ERR: en |= PR_MTE_TCF_ASYNC; - else if (mte_option == MTE_NONE_ERR) + break; + case MTE_NONE_ERR: en |= PR_MTE_TCF_NONE; + break; + } en |= (incl_mask << PR_MTE_TAG_SHIFT); /* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */ diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.h b/tools/testing/selftests/arm64/mte/mte_common_util.h index 195a7d1879e6..2d3e71724e55 100644 --- a/tools/testing/selftests/arm64/mte/mte_common_util.h +++ b/tools/testing/selftests/arm64/mte/mte_common_util.h @@ -75,10 +75,21 @@ unsigned int mte_get_pstate_tco(void); /* Test framework static inline functions/macros */ static inline void evaluate_test(int err, const char *msg) { - if (err == KSFT_PASS) + switch (err) { + case KSFT_PASS: ksft_test_result_pass(msg); - else if (err == KSFT_FAIL) + break; + case KSFT_FAIL: ksft_test_result_fail(msg); + break; + case KSFT_SKIP: + ksft_test_result_skip(msg); + break; + default: + ksft_test_result_error("Unknown return code %d from %s", + err, msg); + break; + } } static inline int check_allocated_memory(void *ptr, size_t size, diff --git a/tools/testing/selftests/arm64/signal/.gitignore b/tools/testing/selftests/arm64/signal/.gitignore index c1742755abb9..e8d2b57f73ec 100644 --- a/tools/testing/selftests/arm64/signal/.gitignore +++ b/tools/testing/selftests/arm64/signal/.gitignore @@ -1,5 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only mangle_* fake_sigreturn_* +sme_* +ssve_* sve_* +za_* !*.[ch] diff --git a/tools/testing/selftests/arm64/signal/test_signals.h b/tools/testing/selftests/arm64/signal/test_signals.h index f909b70d9e98..c70fdec7d7c4 100644 --- a/tools/testing/selftests/arm64/signal/test_signals.h +++ b/tools/testing/selftests/arm64/signal/test_signals.h @@ -34,11 +34,15 @@ enum { FSSBS_BIT, FSVE_BIT, + FSME_BIT, + FSME_FA64_BIT, FMAX_END }; #define FEAT_SSBS (1UL << FSSBS_BIT) #define FEAT_SVE (1UL << FSVE_BIT) +#define FEAT_SME (1UL << FSME_BIT) +#define FEAT_SME_FA64 (1UL << FSME_FA64_BIT) /* * A descriptor used to describe and configure a test case. diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c index 5743897984b0..b588d10afd5b 100644 --- a/tools/testing/selftests/arm64/signal/test_signals_utils.c +++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c @@ -27,6 +27,8 @@ static int sig_copyctx = SIGTRAP; static char const *const feats_names[FMAX_END] = { " SSBS ", " SVE ", + " SME ", + " FA64 ", }; #define MAX_FEATS_SZ 128 @@ -268,6 +270,10 @@ int test_init(struct tdescr *td) td->feats_supported |= FEAT_SSBS; if (getauxval(AT_HWCAP) & HWCAP_SVE) td->feats_supported |= FEAT_SVE; + if (getauxval(AT_HWCAP2) & HWCAP2_SME) + td->feats_supported |= FEAT_SME; + if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64) + td->feats_supported |= FEAT_SME_FA64; if (feats_ok(td)) { if (td->feats_required & td->feats_supported) fprintf(stderr, diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c new file mode 100644 index 000000000000..7ed762b7202f --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Attempt to change the streaming SVE vector length in a signal + * handler, this is not supported and is expected to segfault. + */ + +#include <signal.h> +#include <ucontext.h> +#include <sys/prctl.h> + +#include "test_signals_utils.h" +#include "testcases.h" + +struct fake_sigframe sf; +static unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls = 0; + +static bool sme_get_vls(struct tdescr *td) +{ + int vq, vl; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SVE_SET_VL, vq * 16); + if (vl == -1) + return false; + + vl &= PR_SME_VL_LEN_MASK; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + /* We need at least two VLs */ + if (nvls < 2) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return false; + } + + return true; +} + +static int fake_sigreturn_ssve_change_vl(struct tdescr *td, + siginfo_t *si, ucontext_t *uc) +{ + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); + struct sve_context *sve; + + /* Get a signal context with a SME ZA frame in it */ + if (!get_current_context(td, &sf.uc)) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); + head = get_header(head, SVE_MAGIC, resv_sz, &offset); + if (!head) { + fprintf(stderr, "No SVE context\n"); + return 1; + } + + if (head->size != sizeof(struct sve_context)) { + fprintf(stderr, "Register data present, aborting\n"); + return 1; + } + + sve = (struct sve_context *)head; + + /* No changes are supported; init left us at minimum VL so go to max */ + fprintf(stderr, "Attempting to change VL from %d to %d\n", + sve->vl, vls[0]); + sve->vl = vls[0]; + + fake_sigreturn(&sf, sizeof(sf), 0); + + return 1; +} + +struct tdescr tde = { + .name = "FAKE_SIGRETURN_SSVE_CHANGE", + .descr = "Attempt to change Streaming SVE VL", + .feats_required = FEAT_SME, + .sig_ok = SIGSEGV, + .timeout = 3, + .init = sme_get_vls, + .run = fake_sigreturn_ssve_change_vl, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c new file mode 100644 index 000000000000..f9d76ae32bba --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that using a streaming mode instruction without enabling it + * generates a SIGILL. + */ + +#include <signal.h> +#include <ucontext.h> +#include <sys/prctl.h> + +#include "test_signals_utils.h" +#include "testcases.h" + +int sme_trap_no_sm_trigger(struct tdescr *td) +{ + /* SMSTART ZA ; ADDHA ZA0.S, P0/M, P0/M, Z0.S */ + asm volatile(".inst 0xd503457f ; .inst 0xc0900000"); + + return 0; +} + +int sme_trap_no_sm_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + return 1; +} + +struct tdescr tde = { + .name = "SME trap without SM", + .descr = "Check that we get a SIGILL if we use streaming mode without enabling it", + .timeout = 3, + .feats_required = FEAT_SME, /* We need a SMSTART ZA */ + .sanity_disabled = true, + .trigger = sme_trap_no_sm_trigger, + .run = sme_trap_no_sm_run, + .sig_ok = SIGILL, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c new file mode 100644 index 000000000000..e469ae5348e3 --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that using an instruction not supported in streaming mode + * traps when in streaming mode. + */ + +#include <signal.h> +#include <ucontext.h> +#include <sys/prctl.h> + +#include "test_signals_utils.h" +#include "testcases.h" + +int sme_trap_non_streaming_trigger(struct tdescr *td) +{ + /* + * The framework will handle SIGILL so we need to exit SM to + * stop any other code triggering a further SIGILL down the + * line from using a streaming-illegal instruction. + */ + asm volatile(".inst 0xd503437f; /* SMSTART ZA */ \ + cnt v0.16b, v0.16b; \ + .inst 0xd503447f /* SMSTOP ZA */"); + + return 0; +} + +int sme_trap_non_streaming_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + return 1; +} + +struct tdescr tde = { + .name = "SME SM trap unsupported instruction", + .descr = "Check that we get a SIGILL if we use an unsupported instruction in streaming mode", + .feats_required = FEAT_SME, + .feats_incompatible = FEAT_SME_FA64, + .timeout = 3, + .sanity_disabled = true, + .trigger = sme_trap_non_streaming_trigger, + .run = sme_trap_non_streaming_run, + .sig_ok = SIGILL, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c new file mode 100644 index 000000000000..3a7747af4715 --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that accessing ZA without enabling it generates a SIGILL. + */ + +#include <signal.h> +#include <ucontext.h> +#include <sys/prctl.h> + +#include "test_signals_utils.h" +#include "testcases.h" + +int sme_trap_za_trigger(struct tdescr *td) +{ + /* ZERO ZA */ + asm volatile(".inst 0xc00800ff"); + + return 0; +} + +int sme_trap_za_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + return 1; +} + +struct tdescr tde = { + .name = "SME ZA trap", + .descr = "Check that we get a SIGILL if we access ZA without enabling", + .timeout = 3, + .sanity_disabled = true, + .trigger = sme_trap_za_trigger, + .run = sme_trap_za_run, + .sig_ok = SIGILL, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_vl.c b/tools/testing/selftests/arm64/signal/testcases/sme_vl.c new file mode 100644 index 000000000000..13ff3b35cbaf --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/sme_vl.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Check that the SME vector length reported in signal contexts is the + * expected one. + */ + +#include <signal.h> +#include <ucontext.h> +#include <sys/prctl.h> + +#include "test_signals_utils.h" +#include "testcases.h" + +struct fake_sigframe sf; +unsigned int vl; + +static bool get_sme_vl(struct tdescr *td) +{ + int ret = prctl(PR_SME_GET_VL); + if (ret == -1) + return false; + + vl = ret; + + return true; +} + +static int sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); + struct za_context *za; + + /* Get a signal context which should have a ZA frame in it */ + if (!get_current_context(td, &sf.uc)) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); + head = get_header(head, ZA_MAGIC, resv_sz, &offset); + if (!head) { + fprintf(stderr, "No ZA context\n"); + return 1; + } + za = (struct za_context *)head; + + if (za->vl != vl) { + fprintf(stderr, "ZA sigframe VL %u, expected %u\n", + za->vl, vl); + return 1; + } else { + fprintf(stderr, "got expected VL %u\n", vl); + } + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "SME VL", + .descr = "Check that we get the right SME VL reported", + .feats_required = FEAT_SME, + .timeout = 3, + .init = get_sme_vl, + .run = sme_vl, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c new file mode 100644 index 000000000000..9022a6cab4b3 --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that the streaming SVE register context in signal frames is + * set up as expected. + */ + +#include <signal.h> +#include <ucontext.h> +#include <sys/prctl.h> + +#include "test_signals_utils.h" +#include "testcases.h" + +struct fake_sigframe sf; +static unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls = 0; + +static bool sme_get_vls(struct tdescr *td) +{ + int vq, vl; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SME_SET_VL, vq * 16); + if (vl == -1) + return false; + + vl &= PR_SME_VL_LEN_MASK; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + /* We need at least one VL */ + if (nvls < 1) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return false; + } + + return true; +} + +static void setup_ssve_regs(void) +{ + /* smstart sm; real data is TODO */ + asm volatile(".inst 0xd503437f" : : : ); +} + +static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, + unsigned int vl) +{ + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); + struct sve_context *ssve; + int ret; + + fprintf(stderr, "Testing VL %d\n", vl); + + ret = prctl(PR_SME_SET_VL, vl); + if (ret != vl) { + fprintf(stderr, "Failed to set VL, got %d\n", ret); + return 1; + } + + /* + * Get a signal context which should have a SVE frame and registers + * in it. + */ + setup_ssve_regs(); + if (!get_current_context(td, &sf.uc)) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); + head = get_header(head, SVE_MAGIC, resv_sz, &offset); + if (!head) { + fprintf(stderr, "No SVE context\n"); + return 1; + } + + ssve = (struct sve_context *)head; + if (ssve->vl != vl) { + fprintf(stderr, "Got VL %d, expected %d\n", ssve->vl, vl); + return 1; + } + + /* The actual size validation is done in get_current_context() */ + fprintf(stderr, "Got expected size %u and VL %d\n", + head->size, ssve->vl); + + return 0; +} + +static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + int i; + + for (i = 0; i < nvls; i++) { + /* + * TODO: the signal test helpers can't currently cope + * with signal frames bigger than struct sigcontext, + * skip VLs that will trigger that. + */ + if (vls[i] > 64) { + printf("Skipping VL %u due to stack size\n", vls[i]); + continue; + } + + if (do_one_sme_vl(td, si, uc, vls[i])) + return 1; + } + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "Streaming SVE registers", + .descr = "Check that we get the right Streaming SVE registers reported", + /* + * We shouldn't require FA64 but things like memset() used in the + * helpers might use unsupported instructions so for now disable + * the test unless we've got the full instruction set. + */ + .feats_required = FEAT_SME | FEAT_SME_FA64, + .timeout = 3, + .init = sme_get_vls, + .run = sme_regs, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.c b/tools/testing/selftests/arm64/signal/testcases/testcases.c index 8c2a57fc2f9c..84c36bee4d82 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.c +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.c @@ -75,6 +75,31 @@ bool validate_sve_context(struct sve_context *sve, char **err) return true; } +bool validate_za_context(struct za_context *za, char **err) +{ + /* Size will be rounded up to a multiple of 16 bytes */ + size_t regs_size + = ((ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za->vl)) + 15) / 16) * 16; + + if (!za || !err) + return false; + + /* Either a bare za_context or a za_context followed by regs data */ + if ((za->head.size != sizeof(struct za_context)) && + (za->head.size != regs_size)) { + *err = "bad size for ZA context"; + return false; + } + + if (!sve_vl_valid(za->vl)) { + *err = "SME VL in ZA context invalid"; + + return false; + } + + return true; +} + bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) { bool terminated = false; @@ -82,6 +107,7 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) int flags = 0; struct extra_context *extra = NULL; struct sve_context *sve = NULL; + struct za_context *za = NULL; struct _aarch64_ctx *head = (struct _aarch64_ctx *)uc->uc_mcontext.__reserved; @@ -120,6 +146,13 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) sve = (struct sve_context *)head; flags |= SVE_CTX; break; + case ZA_MAGIC: + if (flags & ZA_CTX) + *err = "Multiple ZA_MAGIC"; + /* Size is validated in validate_za_context() */ + za = (struct za_context *)head; + flags |= ZA_CTX; + break; case EXTRA_MAGIC: if (flags & EXTRA_CTX) *err = "Multiple EXTRA_MAGIC"; @@ -165,6 +198,9 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) if (flags & SVE_CTX) if (!validate_sve_context(sve, err)) return false; + if (flags & ZA_CTX) + if (!validate_za_context(za, err)) + return false; head = GET_RESV_NEXT_HEAD(head); } diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.h b/tools/testing/selftests/arm64/signal/testcases/testcases.h index ad884c135314..49f1d5de7b5b 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.h +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.h @@ -16,7 +16,8 @@ #define FPSIMD_CTX (1 << 0) #define SVE_CTX (1 << 1) -#define EXTRA_CTX (1 << 2) +#define ZA_CTX (1 << 2) +#define EXTRA_CTX (1 << 3) #define KSFT_BAD_MAGIC 0xdeadbeef diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c new file mode 100644 index 000000000000..b94e4f99fcac --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that the ZA register context in signal frames is set up as + * expected. + */ + +#include <signal.h> +#include <ucontext.h> +#include <sys/prctl.h> + +#include "test_signals_utils.h" +#include "testcases.h" + +struct fake_sigframe sf; +static unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls = 0; + +static bool sme_get_vls(struct tdescr *td) +{ + int vq, vl; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SVE_SET_VL, vq * 16); + if (vl == -1) + return false; + + vl &= PR_SME_VL_LEN_MASK; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + /* We need at least one VL */ + if (nvls < 1) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return false; + } + + return true; +} + +static void setup_za_regs(void) +{ + /* smstart za; real data is TODO */ + asm volatile(".inst 0xd503457f" : : : ); +} + +static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, + unsigned int vl) +{ + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); + struct za_context *za; + + fprintf(stderr, "Testing VL %d\n", vl); + + if (prctl(PR_SME_SET_VL, vl) != vl) { + fprintf(stderr, "Failed to set VL\n"); + return 1; + } + + /* + * Get a signal context which should have a SVE frame and registers + * in it. + */ + setup_za_regs(); + if (!get_current_context(td, &sf.uc)) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); + head = get_header(head, ZA_MAGIC, resv_sz, &offset); + if (!head) { + fprintf(stderr, "No ZA context\n"); + return 1; + } + + za = (struct za_context *)head; + if (za->vl != vl) { + fprintf(stderr, "Got VL %d, expected %d\n", za->vl, vl); + return 1; + } + + /* The actual size validation is done in get_current_context() */ + fprintf(stderr, "Got expected size %u and VL %d\n", + head->size, za->vl); + + return 0; +} + +static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + int i; + + for (i = 0; i < nvls; i++) { + /* + * TODO: the signal test helpers can't currently cope + * with signal frames bigger than struct sigcontext, + * skip VLs that will trigger that. + */ + if (vls[i] > 32) { + printf("Skipping VL %u due to stack size\n", vls[i]); + continue; + } + + if (do_one_sme_vl(td, si, uc, vls[i])) + return 1; + } + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "ZA register", + .descr = "Check that we get the right ZA registers reported", + .feats_required = FEAT_SME, + .timeout = 3, + .init = sme_get_vls, + .run = sme_regs, +}; diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c index ca40abe9daa8..da9290817866 100644 --- a/tools/testing/selftests/landlock/base_test.c +++ b/tools/testing/selftests/landlock/base_test.c @@ -18,10 +18,11 @@ #include "common.h" #ifndef O_PATH -#define O_PATH 010000000 +#define O_PATH 010000000 #endif -TEST(inconsistent_attr) { +TEST(inconsistent_attr) +{ const long page_size = sysconf(_SC_PAGESIZE); char *const buf = malloc(page_size + 1); struct landlock_ruleset_attr *const ruleset_attr = (void *)buf; @@ -34,20 +35,26 @@ TEST(inconsistent_attr) { ASSERT_EQ(EINVAL, errno); ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 1, 0)); ASSERT_EQ(EINVAL, errno); + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 7, 0)); + ASSERT_EQ(EINVAL, errno); ASSERT_EQ(-1, landlock_create_ruleset(NULL, 1, 0)); /* The size if less than sizeof(struct landlock_attr_enforce). */ ASSERT_EQ(EFAULT, errno); - ASSERT_EQ(-1, landlock_create_ruleset(NULL, - sizeof(struct landlock_ruleset_attr), 0)); + ASSERT_EQ(-1, landlock_create_ruleset( + NULL, sizeof(struct landlock_ruleset_attr), 0)); ASSERT_EQ(EFAULT, errno); ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size + 1, 0)); ASSERT_EQ(E2BIG, errno); - ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, - sizeof(struct landlock_ruleset_attr), 0)); + /* Checks minimal valid attribute size. */ + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 8, 0)); + ASSERT_EQ(ENOMSG, errno); + ASSERT_EQ(-1, landlock_create_ruleset( + ruleset_attr, + sizeof(struct landlock_ruleset_attr), 0)); ASSERT_EQ(ENOMSG, errno); ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size, 0)); ASSERT_EQ(ENOMSG, errno); @@ -63,38 +70,44 @@ TEST(inconsistent_attr) { free(buf); } -TEST(abi_version) { +TEST(abi_version) +{ const struct landlock_ruleset_attr ruleset_attr = { .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE, }; - ASSERT_EQ(1, landlock_create_ruleset(NULL, 0, - LANDLOCK_CREATE_RULESET_VERSION)); + ASSERT_EQ(2, landlock_create_ruleset(NULL, 0, + LANDLOCK_CREATE_RULESET_VERSION)); ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, - LANDLOCK_CREATE_RULESET_VERSION)); + LANDLOCK_CREATE_RULESET_VERSION)); ASSERT_EQ(EINVAL, errno); ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr), - LANDLOCK_CREATE_RULESET_VERSION)); + LANDLOCK_CREATE_RULESET_VERSION)); ASSERT_EQ(EINVAL, errno); - ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), - LANDLOCK_CREATE_RULESET_VERSION)); + ASSERT_EQ(-1, + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), + LANDLOCK_CREATE_RULESET_VERSION)); ASSERT_EQ(EINVAL, errno); ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0, - LANDLOCK_CREATE_RULESET_VERSION | 1 << 31)); + LANDLOCK_CREATE_RULESET_VERSION | + 1 << 31)); ASSERT_EQ(EINVAL, errno); } -TEST(inval_create_ruleset_flags) { +/* Tests ordering of syscall argument checks. */ +TEST(create_ruleset_checks_ordering) +{ const int last_flag = LANDLOCK_CREATE_RULESET_VERSION; const int invalid_flag = last_flag << 1; + int ruleset_fd; const struct landlock_ruleset_attr ruleset_attr = { .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE, }; + /* Checks priority for invalid flags. */ ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0, invalid_flag)); ASSERT_EQ(EINVAL, errno); @@ -102,44 +115,121 @@ TEST(inval_create_ruleset_flags) { ASSERT_EQ(EINVAL, errno); ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr), - invalid_flag)); + invalid_flag)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), + invalid_flag)); ASSERT_EQ(EINVAL, errno); - ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), invalid_flag)); + /* Checks too big ruleset_attr size. */ + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, -1, 0)); + ASSERT_EQ(E2BIG, errno); + + /* Checks too small ruleset_attr size. */ + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, 0)); + ASSERT_EQ(EINVAL, errno); + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 1, 0)); ASSERT_EQ(EINVAL, errno); + + /* Checks valid call. */ + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); } -TEST(empty_path_beneath_attr) { +/* Tests ordering of syscall argument checks. */ +TEST(add_rule_checks_ordering) +{ const struct landlock_ruleset_attr ruleset_attr = { .handled_access_fs = LANDLOCK_ACCESS_FS_EXECUTE, }; - const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); + struct landlock_path_beneath_attr path_beneath_attr = { + .allowed_access = LANDLOCK_ACCESS_FS_EXECUTE, + .parent_fd = -1, + }; + const int ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); ASSERT_LE(0, ruleset_fd); - /* Similar to struct landlock_path_beneath_attr.parent_fd = 0 */ + /* Checks invalid flags. */ + ASSERT_EQ(-1, landlock_add_rule(-1, 0, NULL, 1)); + ASSERT_EQ(EINVAL, errno); + + /* Checks invalid ruleset FD. */ + ASSERT_EQ(-1, landlock_add_rule(-1, 0, NULL, 0)); + ASSERT_EQ(EBADF, errno); + + /* Checks invalid rule type. */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, 0, NULL, 0)); + ASSERT_EQ(EINVAL, errno); + + /* Checks invalid rule attr. */ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - NULL, 0)); + NULL, 0)); ASSERT_EQ(EFAULT, errno); + + /* Checks invalid path_beneath.parent_fd. */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath_attr, 0)); + ASSERT_EQ(EBADF, errno); + + /* Checks valid call. */ + path_beneath_attr.parent_fd = + open("/tmp", O_PATH | O_NOFOLLOW | O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, path_beneath_attr.parent_fd); + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath_attr, 0)); + ASSERT_EQ(0, close(path_beneath_attr.parent_fd)); ASSERT_EQ(0, close(ruleset_fd)); } -TEST(inval_fd_enforce) { +/* Tests ordering of syscall argument and permission checks. */ +TEST(restrict_self_checks_ordering) +{ + const struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_EXECUTE, + }; + struct landlock_path_beneath_attr path_beneath_attr = { + .allowed_access = LANDLOCK_ACCESS_FS_EXECUTE, + .parent_fd = -1, + }; + const int ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + + ASSERT_LE(0, ruleset_fd); + path_beneath_attr.parent_fd = + open("/tmp", O_PATH | O_NOFOLLOW | O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, path_beneath_attr.parent_fd); + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath_attr, 0)); + ASSERT_EQ(0, close(path_beneath_attr.parent_fd)); + + /* Checks unprivileged enforcement without no_new_privs. */ + drop_caps(_metadata); + ASSERT_EQ(-1, landlock_restrict_self(-1, -1)); + ASSERT_EQ(EPERM, errno); + ASSERT_EQ(-1, landlock_restrict_self(-1, 0)); + ASSERT_EQ(EPERM, errno); + ASSERT_EQ(-1, landlock_restrict_self(ruleset_fd, 0)); + ASSERT_EQ(EPERM, errno); + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + /* Checks invalid flags. */ + ASSERT_EQ(-1, landlock_restrict_self(-1, -1)); + ASSERT_EQ(EINVAL, errno); + + /* Checks invalid ruleset FD. */ ASSERT_EQ(-1, landlock_restrict_self(-1, 0)); ASSERT_EQ(EBADF, errno); -} - -TEST(unpriv_enforce_without_no_new_privs) { - int err; - drop_caps(_metadata); - err = landlock_restrict_self(-1, 0); - ASSERT_EQ(EPERM, errno); - ASSERT_EQ(err, -1); + /* Checks valid call. */ + ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)); + ASSERT_EQ(0, close(ruleset_fd)); } TEST(ruleset_fd_io) @@ -151,8 +241,8 @@ TEST(ruleset_fd_io) char buf; drop_caps(_metadata); - ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); ASSERT_LE(0, ruleset_fd); ASSERT_EQ(-1, write(ruleset_fd, ".", 1)); @@ -197,14 +287,15 @@ TEST(ruleset_fd_transfer) drop_caps(_metadata); /* Creates a test ruleset with a simple rule. */ - ruleset_fd_tx = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); + ruleset_fd_tx = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); ASSERT_LE(0, ruleset_fd_tx); - path_beneath_attr.parent_fd = open("/tmp", O_PATH | O_NOFOLLOW | - O_DIRECTORY | O_CLOEXEC); + path_beneath_attr.parent_fd = + open("/tmp", O_PATH | O_NOFOLLOW | O_DIRECTORY | O_CLOEXEC); ASSERT_LE(0, path_beneath_attr.parent_fd); - ASSERT_EQ(0, landlock_add_rule(ruleset_fd_tx, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath_attr, 0)); + ASSERT_EQ(0, + landlock_add_rule(ruleset_fd_tx, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath_attr, 0)); ASSERT_EQ(0, close(path_beneath_attr.parent_fd)); cmsg = CMSG_FIRSTHDR(&msg); @@ -215,7 +306,8 @@ TEST(ruleset_fd_transfer) memcpy(CMSG_DATA(cmsg), &ruleset_fd_tx, sizeof(ruleset_fd_tx)); /* Sends the ruleset FD over a socketpair and then close it. */ - ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, socket_fds)); + ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, + socket_fds)); ASSERT_EQ(sizeof(data_tx), sendmsg(socket_fds[0], &msg, 0)); ASSERT_EQ(0, close(socket_fds[0])); ASSERT_EQ(0, close(ruleset_fd_tx)); @@ -226,7 +318,8 @@ TEST(ruleset_fd_transfer) int ruleset_fd_rx; *(char *)msg.msg_iov->iov_base = '\0'; - ASSERT_EQ(sizeof(data_tx), recvmsg(socket_fds[1], &msg, MSG_CMSG_CLOEXEC)); + ASSERT_EQ(sizeof(data_tx), + recvmsg(socket_fds[1], &msg, MSG_CMSG_CLOEXEC)); ASSERT_EQ('.', *(char *)msg.msg_iov->iov_base); ASSERT_EQ(0, close(socket_fds[1])); cmsg = CMSG_FIRSTHDR(&msg); diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h index 183b7e8e1b95..7ba18eb23783 100644 --- a/tools/testing/selftests/landlock/common.h +++ b/tools/testing/selftests/landlock/common.h @@ -25,6 +25,7 @@ * this to be possible, we must not call abort() but instead exit smoothly * (hence the step print). */ +/* clang-format off */ #define TEST_F_FORK(fixture_name, test_name) \ static void fixture_name##_##test_name##_child( \ struct __test_metadata *_metadata, \ @@ -71,11 +72,12 @@ FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \ const FIXTURE_VARIANT(fixture_name) \ __attribute__((unused)) *variant) +/* clang-format on */ #ifndef landlock_create_ruleset -static inline int landlock_create_ruleset( - const struct landlock_ruleset_attr *const attr, - const size_t size, const __u32 flags) +static inline int +landlock_create_ruleset(const struct landlock_ruleset_attr *const attr, + const size_t size, const __u32 flags) { return syscall(__NR_landlock_create_ruleset, attr, size, flags); } @@ -83,17 +85,18 @@ static inline int landlock_create_ruleset( #ifndef landlock_add_rule static inline int landlock_add_rule(const int ruleset_fd, - const enum landlock_rule_type rule_type, - const void *const rule_attr, const __u32 flags) + const enum landlock_rule_type rule_type, + const void *const rule_attr, + const __u32 flags) { - return syscall(__NR_landlock_add_rule, ruleset_fd, rule_type, - rule_attr, flags); + return syscall(__NR_landlock_add_rule, ruleset_fd, rule_type, rule_attr, + flags); } #endif #ifndef landlock_restrict_self static inline int landlock_restrict_self(const int ruleset_fd, - const __u32 flags) + const __u32 flags) { return syscall(__NR_landlock_restrict_self, ruleset_fd, flags); } @@ -111,69 +114,76 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all) }; cap_p = cap_get_proc(); - EXPECT_NE(NULL, cap_p) { + EXPECT_NE(NULL, cap_p) + { TH_LOG("Failed to cap_get_proc: %s", strerror(errno)); } - EXPECT_NE(-1, cap_clear(cap_p)) { + EXPECT_NE(-1, cap_clear(cap_p)) + { TH_LOG("Failed to cap_clear: %s", strerror(errno)); } if (!drop_all) { EXPECT_NE(-1, cap_set_flag(cap_p, CAP_PERMITTED, - ARRAY_SIZE(caps), caps, CAP_SET)) { + ARRAY_SIZE(caps), caps, CAP_SET)) + { TH_LOG("Failed to cap_set_flag: %s", strerror(errno)); } } - EXPECT_NE(-1, cap_set_proc(cap_p)) { + EXPECT_NE(-1, cap_set_proc(cap_p)) + { TH_LOG("Failed to cap_set_proc: %s", strerror(errno)); } - EXPECT_NE(-1, cap_free(cap_p)) { + EXPECT_NE(-1, cap_free(cap_p)) + { TH_LOG("Failed to cap_free: %s", strerror(errno)); } } /* We cannot put such helpers in a library because of kselftest_harness.h . */ -__attribute__((__unused__)) -static void disable_caps(struct __test_metadata *const _metadata) +__attribute__((__unused__)) static void +disable_caps(struct __test_metadata *const _metadata) { _init_caps(_metadata, false); } -__attribute__((__unused__)) -static void drop_caps(struct __test_metadata *const _metadata) +__attribute__((__unused__)) static void +drop_caps(struct __test_metadata *const _metadata) { _init_caps(_metadata, true); } static void _effective_cap(struct __test_metadata *const _metadata, - const cap_value_t caps, const cap_flag_value_t value) + const cap_value_t caps, const cap_flag_value_t value) { cap_t cap_p; cap_p = cap_get_proc(); - EXPECT_NE(NULL, cap_p) { + EXPECT_NE(NULL, cap_p) + { TH_LOG("Failed to cap_get_proc: %s", strerror(errno)); } - EXPECT_NE(-1, cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &caps, value)) { + EXPECT_NE(-1, cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &caps, value)) + { TH_LOG("Failed to cap_set_flag: %s", strerror(errno)); } - EXPECT_NE(-1, cap_set_proc(cap_p)) { + EXPECT_NE(-1, cap_set_proc(cap_p)) + { TH_LOG("Failed to cap_set_proc: %s", strerror(errno)); } - EXPECT_NE(-1, cap_free(cap_p)) { + EXPECT_NE(-1, cap_free(cap_p)) + { TH_LOG("Failed to cap_free: %s", strerror(errno)); } } -__attribute__((__unused__)) -static void set_cap(struct __test_metadata *const _metadata, - const cap_value_t caps) +__attribute__((__unused__)) static void +set_cap(struct __test_metadata *const _metadata, const cap_value_t caps) { _effective_cap(_metadata, caps, CAP_SET); } -__attribute__((__unused__)) -static void clear_cap(struct __test_metadata *const _metadata, - const cap_value_t caps) +__attribute__((__unused__)) static void +clear_cap(struct __test_metadata *const _metadata, const cap_value_t caps) { _effective_cap(_metadata, caps, CAP_CLEAR); } diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c index 10c9a1e4ebd9..21a2ce8fa739 100644 --- a/tools/testing/selftests/landlock/fs_test.c +++ b/tools/testing/selftests/landlock/fs_test.c @@ -22,8 +22,21 @@ #include "common.h" -#define TMP_DIR "tmp" -#define BINARY_PATH "./true" +#ifndef renameat2 +int renameat2(int olddirfd, const char *oldpath, int newdirfd, + const char *newpath, unsigned int flags) +{ + return syscall(__NR_renameat2, olddirfd, oldpath, newdirfd, newpath, + flags); +} +#endif + +#ifndef RENAME_EXCHANGE +#define RENAME_EXCHANGE (1 << 1) +#endif + +#define TMP_DIR "tmp" +#define BINARY_PATH "./true" /* Paths (sibling number and depth) */ static const char dir_s1d1[] = TMP_DIR "/s1d1"; @@ -75,7 +88,7 @@ static const char dir_s3d3[] = TMP_DIR "/s3d1/s3d2/s3d3"; */ static void mkdir_parents(struct __test_metadata *const _metadata, - const char *const path) + const char *const path) { char *walker; const char *parent; @@ -90,9 +103,10 @@ static void mkdir_parents(struct __test_metadata *const _metadata, continue; walker[i] = '\0'; err = mkdir(parent, 0700); - ASSERT_FALSE(err && errno != EEXIST) { - TH_LOG("Failed to create directory \"%s\": %s", - parent, strerror(errno)); + ASSERT_FALSE(err && errno != EEXIST) + { + TH_LOG("Failed to create directory \"%s\": %s", parent, + strerror(errno)); } walker[i] = '/'; } @@ -100,22 +114,24 @@ static void mkdir_parents(struct __test_metadata *const _metadata, } static void create_directory(struct __test_metadata *const _metadata, - const char *const path) + const char *const path) { mkdir_parents(_metadata, path); - ASSERT_EQ(0, mkdir(path, 0700)) { + ASSERT_EQ(0, mkdir(path, 0700)) + { TH_LOG("Failed to create directory \"%s\": %s", path, - strerror(errno)); + strerror(errno)); } } static void create_file(struct __test_metadata *const _metadata, - const char *const path) + const char *const path) { mkdir_parents(_metadata, path); - ASSERT_EQ(0, mknod(path, S_IFREG | 0700, 0)) { + ASSERT_EQ(0, mknod(path, S_IFREG | 0700, 0)) + { TH_LOG("Failed to create file \"%s\": %s", path, - strerror(errno)); + strerror(errno)); } } @@ -130,7 +146,7 @@ static int remove_path(const char *const path) goto out; } if (unlink(path) && rmdir(path)) { - if (errno != ENOENT) + if (errno != ENOENT && errno != ENOTDIR) err = errno; goto out; } @@ -221,8 +237,9 @@ static void remove_layout1(struct __test_metadata *const _metadata) EXPECT_EQ(0, remove_path(dir_s3d2)); } -FIXTURE(layout1) { -}; +/* clang-format off */ +FIXTURE(layout1) {}; +/* clang-format on */ FIXTURE_SETUP(layout1) { @@ -242,7 +259,8 @@ FIXTURE_TEARDOWN(layout1) * This helper enables to use the ASSERT_* macros and print the line number * pointing to the test caller. */ -static int test_open_rel(const int dirfd, const char *const path, const int flags) +static int test_open_rel(const int dirfd, const char *const path, + const int flags) { int fd; @@ -291,23 +309,23 @@ TEST_F_FORK(layout1, inval) { struct landlock_path_beneath_attr path_beneath = { .allowed_access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, .parent_fd = -1, }; struct landlock_ruleset_attr ruleset_attr = { .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }; int ruleset_fd; - path_beneath.parent_fd = open(dir_s1d2, O_PATH | O_DIRECTORY | - O_CLOEXEC); + path_beneath.parent_fd = + open(dir_s1d2, O_PATH | O_DIRECTORY | O_CLOEXEC); ASSERT_LE(0, path_beneath.parent_fd); ruleset_fd = open(dir_s1d1, O_PATH | O_DIRECTORY | O_CLOEXEC); ASSERT_LE(0, ruleset_fd); ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); /* Returns EBADF because ruleset_fd is not a landlock-ruleset FD. */ ASSERT_EQ(EBADF, errno); ASSERT_EQ(0, close(ruleset_fd)); @@ -315,55 +333,55 @@ TEST_F_FORK(layout1, inval) ruleset_fd = open(dir_s1d1, O_DIRECTORY | O_CLOEXEC); ASSERT_LE(0, ruleset_fd); ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); /* Returns EBADFD because ruleset_fd is not a valid ruleset. */ ASSERT_EQ(EBADFD, errno); ASSERT_EQ(0, close(ruleset_fd)); /* Gets a real ruleset. */ - ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); ASSERT_LE(0, ruleset_fd); ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); ASSERT_EQ(0, close(path_beneath.parent_fd)); /* Tests without O_PATH. */ path_beneath.parent_fd = open(dir_s1d2, O_DIRECTORY | O_CLOEXEC); ASSERT_LE(0, path_beneath.parent_fd); ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); ASSERT_EQ(0, close(path_beneath.parent_fd)); /* Tests with a ruleset FD. */ path_beneath.parent_fd = ruleset_fd; ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); ASSERT_EQ(EBADFD, errno); /* Checks unhandled allowed_access. */ - path_beneath.parent_fd = open(dir_s1d2, O_PATH | O_DIRECTORY | - O_CLOEXEC); + path_beneath.parent_fd = + open(dir_s1d2, O_PATH | O_DIRECTORY | O_CLOEXEC); ASSERT_LE(0, path_beneath.parent_fd); /* Test with legitimate values. */ path_beneath.allowed_access |= LANDLOCK_ACCESS_FS_EXECUTE; ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); ASSERT_EQ(EINVAL, errno); path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_EXECUTE; /* Test with unknown (64-bits) value. */ path_beneath.allowed_access |= (1ULL << 60); ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); ASSERT_EQ(EINVAL, errno); path_beneath.allowed_access &= ~(1ULL << 60); /* Test with no access. */ path_beneath.allowed_access = 0; ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); ASSERT_EQ(ENOMSG, errno); path_beneath.allowed_access &= ~(1ULL << 60); @@ -376,12 +394,14 @@ TEST_F_FORK(layout1, inval) ASSERT_EQ(0, close(ruleset_fd)); } +/* clang-format off */ + #define ACCESS_FILE ( \ LANDLOCK_ACCESS_FS_EXECUTE | \ LANDLOCK_ACCESS_FS_WRITE_FILE | \ LANDLOCK_ACCESS_FS_READ_FILE) -#define ACCESS_LAST LANDLOCK_ACCESS_FS_MAKE_SYM +#define ACCESS_LAST LANDLOCK_ACCESS_FS_REFER #define ACCESS_ALL ( \ ACCESS_FILE | \ @@ -394,55 +414,90 @@ TEST_F_FORK(layout1, inval) LANDLOCK_ACCESS_FS_MAKE_SOCK | \ LANDLOCK_ACCESS_FS_MAKE_FIFO | \ LANDLOCK_ACCESS_FS_MAKE_BLOCK | \ + LANDLOCK_ACCESS_FS_MAKE_SYM | \ ACCESS_LAST) -TEST_F_FORK(layout1, file_access_rights) +/* clang-format on */ + +TEST_F_FORK(layout1, file_and_dir_access_rights) { __u64 access; int err; - struct landlock_path_beneath_attr path_beneath = {}; + struct landlock_path_beneath_attr path_beneath_file = {}, + path_beneath_dir = {}; struct landlock_ruleset_attr ruleset_attr = { .handled_access_fs = ACCESS_ALL, }; - const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); + const int ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); ASSERT_LE(0, ruleset_fd); /* Tests access rights for files. */ - path_beneath.parent_fd = open(file1_s1d2, O_PATH | O_CLOEXEC); - ASSERT_LE(0, path_beneath.parent_fd); + path_beneath_file.parent_fd = open(file1_s1d2, O_PATH | O_CLOEXEC); + ASSERT_LE(0, path_beneath_file.parent_fd); + + /* Tests access rights for directories. */ + path_beneath_dir.parent_fd = + open(dir_s1d2, O_PATH | O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, path_beneath_dir.parent_fd); + for (access = 1; access <= ACCESS_LAST; access <<= 1) { - path_beneath.allowed_access = access; + path_beneath_dir.allowed_access = access; + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, + LANDLOCK_RULE_PATH_BENEATH, + &path_beneath_dir, 0)); + + path_beneath_file.allowed_access = access; err = landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0); - if ((access | ACCESS_FILE) == ACCESS_FILE) { + &path_beneath_file, 0); + if (access & ACCESS_FILE) { ASSERT_EQ(0, err); } else { ASSERT_EQ(-1, err); ASSERT_EQ(EINVAL, errno); } } - ASSERT_EQ(0, close(path_beneath.parent_fd)); + ASSERT_EQ(0, close(path_beneath_file.parent_fd)); + ASSERT_EQ(0, close(path_beneath_dir.parent_fd)); + ASSERT_EQ(0, close(ruleset_fd)); +} + +TEST_F_FORK(layout1, unknown_access_rights) +{ + __u64 access_mask; + + for (access_mask = 1ULL << 63; access_mask != ACCESS_LAST; + access_mask >>= 1) { + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = access_mask, + }; + + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0)); + ASSERT_EQ(EINVAL, errno); + } } static void add_path_beneath(struct __test_metadata *const _metadata, - const int ruleset_fd, const __u64 allowed_access, - const char *const path) + const int ruleset_fd, const __u64 allowed_access, + const char *const path) { struct landlock_path_beneath_attr path_beneath = { .allowed_access = allowed_access, }; path_beneath.parent_fd = open(path, O_PATH | O_CLOEXEC); - ASSERT_LE(0, path_beneath.parent_fd) { + ASSERT_LE(0, path_beneath.parent_fd) + { TH_LOG("Failed to open directory \"%s\": %s", path, - strerror(errno)); + strerror(errno)); } ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)) { + &path_beneath, 0)) + { TH_LOG("Failed to update the ruleset with \"%s\": %s", path, - strerror(errno)); + strerror(errno)); } ASSERT_EQ(0, close(path_beneath.parent_fd)); } @@ -452,6 +507,8 @@ struct rule { __u64 access; }; +/* clang-format off */ + #define ACCESS_RO ( \ LANDLOCK_ACCESS_FS_READ_FILE | \ LANDLOCK_ACCESS_FS_READ_DIR) @@ -460,39 +517,46 @@ struct rule { ACCESS_RO | \ LANDLOCK_ACCESS_FS_WRITE_FILE) +/* clang-format on */ + static int create_ruleset(struct __test_metadata *const _metadata, - const __u64 handled_access_fs, const struct rule rules[]) + const __u64 handled_access_fs, + const struct rule rules[]) { int ruleset_fd, i; struct landlock_ruleset_attr ruleset_attr = { .handled_access_fs = handled_access_fs, }; - ASSERT_NE(NULL, rules) { + ASSERT_NE(NULL, rules) + { TH_LOG("No rule list"); } - ASSERT_NE(NULL, rules[0].path) { + ASSERT_NE(NULL, rules[0].path) + { TH_LOG("Empty rule list"); } - ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); - ASSERT_LE(0, ruleset_fd) { + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd) + { TH_LOG("Failed to create a ruleset: %s", strerror(errno)); } for (i = 0; rules[i].path; i++) { add_path_beneath(_metadata, ruleset_fd, rules[i].access, - rules[i].path); + rules[i].path); } return ruleset_fd; } static void enforce_ruleset(struct __test_metadata *const _metadata, - const int ruleset_fd) + const int ruleset_fd) { ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); - ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)) { + ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)) + { TH_LOG("Failed to enforce ruleset: %s", strerror(errno)); } } @@ -503,13 +567,14 @@ TEST_F_FORK(layout1, proc_nsfs) { .path = "/dev/null", .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, - {} + {}, }; struct landlock_path_beneath_attr path_beneath; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access | - LANDLOCK_ACCESS_FS_READ_DIR, rules); + const int ruleset_fd = create_ruleset( + _metadata, rules[0].access | LANDLOCK_ACCESS_FS_READ_DIR, + rules); ASSERT_LE(0, ruleset_fd); ASSERT_EQ(0, test_open("/proc/self/ns/mnt", O_RDONLY)); @@ -536,22 +601,23 @@ TEST_F_FORK(layout1, proc_nsfs) * references to a ruleset. */ path_beneath.allowed_access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, path_beneath.parent_fd = open("/proc/self/ns/mnt", O_PATH | O_CLOEXEC); ASSERT_LE(0, path_beneath.parent_fd); ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, - &path_beneath, 0)); + &path_beneath, 0)); ASSERT_EQ(EBADFD, errno); ASSERT_EQ(0, close(path_beneath.parent_fd)); } -TEST_F_FORK(layout1, unpriv) { +TEST_F_FORK(layout1, unpriv) +{ const struct rule rules[] = { { .path = dir_s1d2, .access = ACCESS_RO, }, - {} + {}, }; int ruleset_fd; @@ -577,9 +643,9 @@ TEST_F_FORK(layout1, effective_access) { .path = file1_s2d2, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); char buf; @@ -589,17 +655,23 @@ TEST_F_FORK(layout1, effective_access) enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); - /* Tests on a directory. */ + /* Tests on a directory (with or without O_PATH). */ ASSERT_EQ(EACCES, test_open("/", O_RDONLY)); + ASSERT_EQ(0, test_open("/", O_RDONLY | O_PATH)); ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY | O_PATH)); ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY | O_PATH)); + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY)); ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY)); ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); - /* Tests on a file. */ + /* Tests on a file (with or without O_PATH). */ ASSERT_EQ(EACCES, test_open(dir_s2d2, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY | O_PATH)); + ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY)); /* Checks effective read and write actions. */ @@ -626,7 +698,7 @@ TEST_F_FORK(layout1, unhandled_access) .path = dir_s1d2, .access = ACCESS_RO, }, - {} + {}, }; /* Here, we only handle read accesses, not write accesses. */ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RO, rules); @@ -653,14 +725,14 @@ TEST_F_FORK(layout1, ruleset_overlap) { .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, { .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_READ_DIR, + LANDLOCK_ACCESS_FS_READ_DIR, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -687,6 +759,113 @@ TEST_F_FORK(layout1, ruleset_overlap) ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); } +TEST_F_FORK(layout1, layer_rule_unions) +{ + const struct rule layer1[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + /* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */ + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {}, + }; + const struct rule layer2[] = { + /* Doesn't change anything from layer1. */ + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {}, + }; + const struct rule layer3[] = { + /* Only allows write (but not read) to dir_s1d3. */ + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {}, + }; + int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks s1d1 hierarchy with layer1. */ + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Checks s1d2 hierarchy with layer1. */ + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Checks s1d3 hierarchy with layer1. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY)); + /* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Doesn't change anything from layer1. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks s1d1 hierarchy with layer2. */ + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Checks s1d2 hierarchy with layer2. */ + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Checks s1d3 hierarchy with layer2. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY)); + /* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Only allows write (but not read) to dir_s1d3. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks s1d1 hierarchy with layer3. */ + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Checks s1d2 hierarchy with layer3. */ + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Checks s1d3 hierarchy with layer3. */ + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY)); + /* dir_s1d3 should now deny READ_FILE and WRITE_FILE (O_RDWR). */ + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); +} + TEST_F_FORK(layout1, non_overlapping_accesses) { const struct rule layer1[] = { @@ -694,22 +873,22 @@ TEST_F_FORK(layout1, non_overlapping_accesses) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_MAKE_REG, }, - {} + {}, }; const struct rule layer2[] = { { .path = dir_s1d3, .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, }, - {} + {}, }; int ruleset_fd; ASSERT_EQ(0, unlink(file1_s1d1)); ASSERT_EQ(0, unlink(file1_s1d2)); - ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, - layer1); + ruleset_fd = + create_ruleset(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, layer1); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -720,7 +899,7 @@ TEST_F_FORK(layout1, non_overlapping_accesses) ASSERT_EQ(0, unlink(file1_s1d2)); ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_REMOVE_FILE, - layer2); + layer2); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -758,7 +937,7 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) .path = file1_s1d3, .access = LANDLOCK_ACCESS_FS_READ_FILE, }, - {} + {}, }; /* First rule with write restrictions. */ const struct rule layer2_read_write[] = { @@ -766,14 +945,14 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) { .path = dir_s1d3, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, /* ...but also denies read access via its grandparent directory. */ { .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_WRITE_FILE, }, - {} + {}, }; const struct rule layer3_read[] = { /* Allows read access via its great-grandparent directory. */ @@ -781,7 +960,7 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) .path = dir_s1d1, .access = LANDLOCK_ACCESS_FS_READ_FILE, }, - {} + {}, }; const struct rule layer4_read_write[] = { /* @@ -792,7 +971,7 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_READ_FILE, }, - {} + {}, }; const struct rule layer5_read[] = { /* @@ -803,7 +982,7 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_READ_FILE, }, - {} + {}, }; const struct rule layer6_execute[] = { /* @@ -814,7 +993,7 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) .path = dir_s2d1, .access = LANDLOCK_ACCESS_FS_EXECUTE, }, - {} + {}, }; const struct rule layer7_read_write[] = { /* @@ -825,12 +1004,12 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_WRITE_FILE, }, - {} + {}, }; int ruleset_fd; ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE, - layer1_read); + layer1_read); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -840,8 +1019,10 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY)); - ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, layer2_read_write); + ruleset_fd = create_ruleset(_metadata, + LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + layer2_read_write); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -852,7 +1033,7 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY)); ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE, - layer3_read); + layer3_read); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -863,8 +1044,10 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY)); /* This time, denies write access for the file hierarchy. */ - ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, layer4_read_write); + ruleset_fd = create_ruleset(_metadata, + LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + layer4_read_write); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -879,7 +1062,7 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY)); ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE, - layer5_read); + layer5_read); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -891,7 +1074,7 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_EXECUTE, - layer6_execute); + layer6_execute); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -902,8 +1085,10 @@ TEST_F_FORK(layout1, interleaved_masked_accesses) ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY)); ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); - ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, layer7_read_write); + ruleset_fd = create_ruleset(_metadata, + LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + layer7_read_write); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -921,9 +1106,9 @@ TEST_F_FORK(layout1, inherit_subset) { .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_READ_DIR, + LANDLOCK_ACCESS_FS_READ_DIR, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -949,7 +1134,7 @@ TEST_F_FORK(layout1, inherit_subset) * ANDed with the previous ones. */ add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_WRITE_FILE, - dir_s1d2); + dir_s1d2); /* * According to ruleset_fd, dir_s1d2 should now have the * LANDLOCK_ACCESS_FS_READ_FILE and LANDLOCK_ACCESS_FS_WRITE_FILE @@ -1004,7 +1189,7 @@ TEST_F_FORK(layout1, inherit_subset) * that there was no rule tied to it before. */ add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_WRITE_FILE, - dir_s1d3); + dir_s1d3); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -1039,7 +1224,7 @@ TEST_F_FORK(layout1, inherit_superset) .path = dir_s1d3, .access = ACCESS_RO, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -1054,8 +1239,10 @@ TEST_F_FORK(layout1, inherit_superset) ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); /* Now dir_s1d2, parent of dir_s1d3, gets a new rule tied to it. */ - add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_READ_DIR, dir_s1d2); + add_path_beneath(_metadata, ruleset_fd, + LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_READ_DIR, + dir_s1d2); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); @@ -1075,12 +1262,12 @@ TEST_F_FORK(layout1, max_layers) .path = dir_s1d2, .access = ACCESS_RO, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); ASSERT_LE(0, ruleset_fd); - for (i = 0; i < 64; i++) + for (i = 0; i < 16; i++) enforce_ruleset(_metadata, ruleset_fd); for (i = 0; i < 2; i++) { @@ -1097,15 +1284,15 @@ TEST_F_FORK(layout1, empty_or_same_ruleset) int ruleset_fd; /* Tests empty handled_access_fs. */ - ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); ASSERT_LE(-1, ruleset_fd); ASSERT_EQ(ENOMSG, errno); /* Enforces policy which deny read access to all files. */ ruleset_attr.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE; - ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); @@ -1113,8 +1300,8 @@ TEST_F_FORK(layout1, empty_or_same_ruleset) /* Nests a policy which deny read access to all directories. */ ruleset_attr.handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR; - ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); @@ -1137,7 +1324,7 @@ TEST_F_FORK(layout1, rule_on_mountpoint) .path = dir_s3d2, .access = ACCESS_RO, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -1166,7 +1353,7 @@ TEST_F_FORK(layout1, rule_over_mountpoint) .path = dir_s3d1, .access = ACCESS_RO, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -1194,7 +1381,7 @@ TEST_F_FORK(layout1, rule_over_root_allow_then_deny) .path = "/", .access = ACCESS_RO, }, - {} + {}, }; int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -1224,7 +1411,7 @@ TEST_F_FORK(layout1, rule_over_root_deny) .path = "/", .access = LANDLOCK_ACCESS_FS_READ_FILE, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -1244,12 +1431,13 @@ TEST_F_FORK(layout1, rule_inside_mount_ns) .path = "s3d3", .access = ACCESS_RO, }, - {} + {}, }; int ruleset_fd; set_cap(_metadata, CAP_SYS_ADMIN); - ASSERT_EQ(0, syscall(SYS_pivot_root, dir_s3d2, dir_s3d3)) { + ASSERT_EQ(0, syscall(__NR_pivot_root, dir_s3d2, dir_s3d3)) + { TH_LOG("Failed to pivot root: %s", strerror(errno)); }; ASSERT_EQ(0, chdir("/")); @@ -1271,7 +1459,7 @@ TEST_F_FORK(layout1, mount_and_pivot) .path = dir_s3d2, .access = ACCESS_RO, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -1282,7 +1470,7 @@ TEST_F_FORK(layout1, mount_and_pivot) set_cap(_metadata, CAP_SYS_ADMIN); ASSERT_EQ(-1, mount(NULL, dir_s3d2, NULL, MS_RDONLY, NULL)); ASSERT_EQ(EPERM, errno); - ASSERT_EQ(-1, syscall(SYS_pivot_root, dir_s3d2, dir_s3d3)); + ASSERT_EQ(-1, syscall(__NR_pivot_root, dir_s3d2, dir_s3d3)); ASSERT_EQ(EPERM, errno); clear_cap(_metadata, CAP_SYS_ADMIN); } @@ -1294,28 +1482,29 @@ TEST_F_FORK(layout1, move_mount) .path = dir_s3d2, .access = ACCESS_RO, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); ASSERT_LE(0, ruleset_fd); set_cap(_metadata, CAP_SYS_ADMIN); - ASSERT_EQ(0, syscall(SYS_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD, - dir_s1d2, 0)) { + ASSERT_EQ(0, syscall(__NR_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD, + dir_s1d2, 0)) + { TH_LOG("Failed to move mount: %s", strerror(errno)); } - ASSERT_EQ(0, syscall(SYS_move_mount, AT_FDCWD, dir_s1d2, AT_FDCWD, - dir_s3d2, 0)); + ASSERT_EQ(0, syscall(__NR_move_mount, AT_FDCWD, dir_s1d2, AT_FDCWD, + dir_s3d2, 0)); clear_cap(_metadata, CAP_SYS_ADMIN); enforce_ruleset(_metadata, ruleset_fd); ASSERT_EQ(0, close(ruleset_fd)); set_cap(_metadata, CAP_SYS_ADMIN); - ASSERT_EQ(-1, syscall(SYS_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD, - dir_s1d2, 0)); + ASSERT_EQ(-1, syscall(__NR_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD, + dir_s1d2, 0)); ASSERT_EQ(EPERM, errno); clear_cap(_metadata, CAP_SYS_ADMIN); } @@ -1335,7 +1524,7 @@ TEST_F_FORK(layout1, release_inodes) .path = dir_s3d3, .access = ACCESS_RO, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); @@ -1362,7 +1551,7 @@ enum relative_access { }; static void test_relative_path(struct __test_metadata *const _metadata, - const enum relative_access rel) + const enum relative_access rel) { /* * Common layer to check that chroot doesn't ignore it (i.e. a chroot @@ -1373,7 +1562,7 @@ static void test_relative_path(struct __test_metadata *const _metadata, .path = TMP_DIR, .access = ACCESS_RO, }, - {} + {}, }; const struct rule layer2_subs[] = { { @@ -1384,7 +1573,7 @@ static void test_relative_path(struct __test_metadata *const _metadata, .path = dir_s2d2, .access = ACCESS_RO, }, - {} + {}, }; int dirfd, ruleset_fd; @@ -1425,14 +1614,16 @@ static void test_relative_path(struct __test_metadata *const _metadata, break; case REL_CHROOT_ONLY: /* Do chroot into dir_s1d2 (relative to dir_s2d2). */ - ASSERT_EQ(0, chroot("../../s1d1/s1d2")) { + ASSERT_EQ(0, chroot("../../s1d1/s1d2")) + { TH_LOG("Failed to chroot: %s", strerror(errno)); } dirfd = AT_FDCWD; break; case REL_CHROOT_CHDIR: /* Do chroot into dir_s1d2. */ - ASSERT_EQ(0, chroot(".")) { + ASSERT_EQ(0, chroot(".")) + { TH_LOG("Failed to chroot: %s", strerror(errno)); } dirfd = AT_FDCWD; @@ -1440,7 +1631,7 @@ static void test_relative_path(struct __test_metadata *const _metadata, } ASSERT_EQ((rel == REL_CHROOT_CHDIR) ? 0 : EACCES, - test_open_rel(dirfd, "..", O_RDONLY)); + test_open_rel(dirfd, "..", O_RDONLY)); ASSERT_EQ(0, test_open_rel(dirfd, ".", O_RDONLY)); if (rel == REL_CHROOT_ONLY) { @@ -1462,11 +1653,13 @@ static void test_relative_path(struct __test_metadata *const _metadata, if (rel != REL_CHROOT_CHDIR) { ASSERT_EQ(EACCES, test_open_rel(dirfd, "../../s1d1", O_RDONLY)); ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2", O_RDONLY)); - ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2/s1d3", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2/s1d3", + O_RDONLY)); ASSERT_EQ(EACCES, test_open_rel(dirfd, "../../s2d1", O_RDONLY)); ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2", O_RDONLY)); - ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2/s2d3", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2/s2d3", + O_RDONLY)); } if (rel == REL_OPEN) @@ -1495,40 +1688,42 @@ TEST_F_FORK(layout1, relative_chroot_chdir) } static void copy_binary(struct __test_metadata *const _metadata, - const char *const dst_path) + const char *const dst_path) { int dst_fd, src_fd; struct stat statbuf; dst_fd = open(dst_path, O_WRONLY | O_TRUNC | O_CLOEXEC); - ASSERT_LE(0, dst_fd) { - TH_LOG("Failed to open \"%s\": %s", dst_path, - strerror(errno)); + ASSERT_LE(0, dst_fd) + { + TH_LOG("Failed to open \"%s\": %s", dst_path, strerror(errno)); } src_fd = open(BINARY_PATH, O_RDONLY | O_CLOEXEC); - ASSERT_LE(0, src_fd) { + ASSERT_LE(0, src_fd) + { TH_LOG("Failed to open \"" BINARY_PATH "\": %s", - strerror(errno)); + strerror(errno)); } ASSERT_EQ(0, fstat(src_fd, &statbuf)); - ASSERT_EQ(statbuf.st_size, sendfile(dst_fd, src_fd, 0, - statbuf.st_size)); + ASSERT_EQ(statbuf.st_size, + sendfile(dst_fd, src_fd, 0, statbuf.st_size)); ASSERT_EQ(0, close(src_fd)); ASSERT_EQ(0, close(dst_fd)); } -static void test_execute(struct __test_metadata *const _metadata, - const int err, const char *const path) +static void test_execute(struct __test_metadata *const _metadata, const int err, + const char *const path) { int status; - char *const argv[] = {(char *)path, NULL}; + char *const argv[] = { (char *)path, NULL }; const pid_t child = fork(); ASSERT_LE(0, child); if (child == 0) { - ASSERT_EQ(err ? -1 : 0, execve(path, argv, NULL)) { + ASSERT_EQ(err ? -1 : 0, execve(path, argv, NULL)) + { TH_LOG("Failed to execute \"%s\": %s", path, - strerror(errno)); + strerror(errno)); }; ASSERT_EQ(err, errno); _exit(_metadata->passed ? 2 : 1); @@ -1536,9 +1731,10 @@ static void test_execute(struct __test_metadata *const _metadata, } ASSERT_EQ(child, waitpid(child, &status, 0)); ASSERT_EQ(1, WIFEXITED(status)); - ASSERT_EQ(err ? 2 : 0, WEXITSTATUS(status)) { + ASSERT_EQ(err ? 2 : 0, WEXITSTATUS(status)) + { TH_LOG("Unexpected return code for \"%s\": %s", path, - strerror(errno)); + strerror(errno)); }; } @@ -1549,10 +1745,10 @@ TEST_F_FORK(layout1, execute) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_EXECUTE, }, - {} + {}, }; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const int ruleset_fd = + create_ruleset(_metadata, rules[0].access, rules); ASSERT_LE(0, ruleset_fd); copy_binary(_metadata, file1_s1d1); @@ -1577,15 +1773,21 @@ TEST_F_FORK(layout1, execute) TEST_F_FORK(layout1, link) { - const struct rule rules[] = { + const struct rule layer1[] = { { .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_MAKE_REG, }, - {} + {}, }; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const struct rule layer2[] = { + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, + }, + {}, + }; + int ruleset_fd = create_ruleset(_metadata, layer1[0].access, layer1); ASSERT_LE(0, ruleset_fd); @@ -1598,14 +1800,30 @@ TEST_F_FORK(layout1, link) ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1)); ASSERT_EQ(EACCES, errno); + /* Denies linking because of reparenting. */ ASSERT_EQ(-1, link(file1_s2d1, file1_s1d2)); ASSERT_EQ(EXDEV, errno); ASSERT_EQ(-1, link(file2_s1d2, file1_s1d3)); ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, link(file2_s1d3, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); ASSERT_EQ(0, link(file2_s1d2, file1_s1d2)); ASSERT_EQ(0, link(file2_s1d3, file1_s1d3)); + + /* Prepares for next unlinks. */ + ASSERT_EQ(0, unlink(file2_s1d2)); + ASSERT_EQ(0, unlink(file2_s1d3)); + + ruleset_fd = create_ruleset(_metadata, layer2[0].access, layer2); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks that linkind doesn't require the ability to delete a file. */ + ASSERT_EQ(0, link(file1_s1d2, file2_s1d2)); + ASSERT_EQ(0, link(file1_s1d3, file2_s1d3)); } TEST_F_FORK(layout1, rename_file) @@ -1619,14 +1837,13 @@ TEST_F_FORK(layout1, rename_file) .path = dir_s2d2, .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, }, - {} + {}, }; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const int ruleset_fd = + create_ruleset(_metadata, rules[0].access, rules); ASSERT_LE(0, ruleset_fd); - ASSERT_EQ(0, unlink(file1_s1d1)); ASSERT_EQ(0, unlink(file1_s1d2)); enforce_ruleset(_metadata, ruleset_fd); @@ -1662,9 +1879,15 @@ TEST_F_FORK(layout1, rename_file) ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s2d1, RENAME_EXCHANGE)); ASSERT_EQ(EACCES, errno); + /* Checks that file1_s2d1 cannot be removed (instead of ENOTDIR). */ + ASSERT_EQ(-1, rename(dir_s2d2, file1_s2d1)); + ASSERT_EQ(EACCES, errno); ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, dir_s2d2, RENAME_EXCHANGE)); ASSERT_EQ(EACCES, errno); + /* Checks that file1_s1d1 cannot be removed (instead of EISDIR). */ + ASSERT_EQ(-1, rename(file1_s1d1, dir_s1d2)); + ASSERT_EQ(EACCES, errno); /* Renames files with different parents. */ ASSERT_EQ(-1, rename(file1_s2d2, file1_s1d2)); @@ -1675,14 +1898,14 @@ TEST_F_FORK(layout1, rename_file) /* Exchanges and renames files with same parent. */ ASSERT_EQ(0, renameat2(AT_FDCWD, file2_s2d3, AT_FDCWD, file1_s2d3, - RENAME_EXCHANGE)); + RENAME_EXCHANGE)); ASSERT_EQ(0, rename(file2_s2d3, file1_s2d3)); /* Exchanges files and directories with same parent, twice. */ ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s2d3, - RENAME_EXCHANGE)); + RENAME_EXCHANGE)); ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s2d3, - RENAME_EXCHANGE)); + RENAME_EXCHANGE)); } TEST_F_FORK(layout1, rename_dir) @@ -1696,10 +1919,10 @@ TEST_F_FORK(layout1, rename_dir) .path = dir_s2d1, .access = LANDLOCK_ACCESS_FS_REMOVE_DIR, }, - {} + {}, }; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const int ruleset_fd = + create_ruleset(_metadata, rules[0].access, rules); ASSERT_LE(0, ruleset_fd); @@ -1727,22 +1950,743 @@ TEST_F_FORK(layout1, rename_dir) ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s1d1, AT_FDCWD, dir_s2d1, RENAME_EXCHANGE)); ASSERT_EQ(EACCES, errno); + /* Checks that dir_s1d2 cannot be removed (instead of ENOTDIR). */ + ASSERT_EQ(-1, rename(dir_s1d2, file1_s1d1)); + ASSERT_EQ(EACCES, errno); ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, dir_s1d2, RENAME_EXCHANGE)); ASSERT_EQ(EACCES, errno); + /* Checks that dir_s1d2 cannot be removed (instead of EISDIR). */ + ASSERT_EQ(-1, rename(file1_s1d1, dir_s1d2)); + ASSERT_EQ(EACCES, errno); /* * Exchanges and renames directory to the same parent, which allows * directory removal. */ ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, file1_s1d2, - RENAME_EXCHANGE)); + RENAME_EXCHANGE)); ASSERT_EQ(0, unlink(dir_s1d3)); ASSERT_EQ(0, mkdir(dir_s1d3, 0700)); ASSERT_EQ(0, rename(file1_s1d2, dir_s1d3)); ASSERT_EQ(0, rmdir(dir_s1d3)); } +TEST_F_FORK(layout1, reparent_refer) +{ + const struct rule layer1[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + .path = dir_s2d2, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + {}, + }; + int ruleset_fd = + create_ruleset(_metadata, LANDLOCK_ACCESS_FS_REFER, layer1); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(-1, rename(dir_s1d2, dir_s2d1)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, rename(dir_s1d2, dir_s2d2)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, rename(dir_s1d2, dir_s2d3)); + ASSERT_EQ(EXDEV, errno); + + ASSERT_EQ(-1, rename(dir_s1d3, dir_s2d1)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, rename(dir_s1d3, dir_s2d2)); + ASSERT_EQ(EXDEV, errno); + /* + * Moving should only be allowed when the source and the destination + * parent directory have REFER. + */ + ASSERT_EQ(-1, rename(dir_s1d3, dir_s2d3)); + ASSERT_EQ(ENOTEMPTY, errno); + ASSERT_EQ(0, unlink(file1_s2d3)); + ASSERT_EQ(0, unlink(file2_s2d3)); + ASSERT_EQ(0, rename(dir_s1d3, dir_s2d3)); +} + +TEST_F_FORK(layout1, reparent_link) +{ + const struct rule layer1[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_MAKE_REG, + }, + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + .path = dir_s2d2, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + .path = dir_s2d3, + .access = LANDLOCK_ACCESS_FS_MAKE_REG, + }, + {}, + }; + const int ruleset_fd = create_ruleset( + _metadata, + LANDLOCK_ACCESS_FS_MAKE_REG | LANDLOCK_ACCESS_FS_REFER, layer1); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, unlink(file1_s1d3)); + + /* Denies linking because of missing MAKE_REG. */ + ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + /* Denies linking because of missing source and destination REFER. */ + ASSERT_EQ(-1, link(file1_s2d1, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + /* Denies linking because of missing source REFER. */ + ASSERT_EQ(-1, link(file1_s2d1, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + + /* Denies linking because of missing MAKE_REG. */ + ASSERT_EQ(-1, link(file1_s2d2, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + /* Denies linking because of missing destination REFER. */ + ASSERT_EQ(-1, link(file1_s2d2, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + + /* Allows linking because of REFER and MAKE_REG. */ + ASSERT_EQ(0, link(file1_s2d2, file1_s1d3)); + ASSERT_EQ(0, unlink(file1_s2d2)); + /* Reverse linking denied because of missing MAKE_REG. */ + ASSERT_EQ(-1, link(file1_s1d3, file1_s2d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(0, unlink(file1_s2d3)); + /* Checks reverse linking. */ + ASSERT_EQ(0, link(file1_s1d3, file1_s2d3)); + ASSERT_EQ(0, unlink(file1_s1d3)); + + /* + * This is OK for a file link, but it should not be allowed for a + * directory rename (because of the superset of access rights. + */ + ASSERT_EQ(0, link(file1_s2d3, file1_s1d3)); + ASSERT_EQ(0, unlink(file1_s1d3)); + + ASSERT_EQ(-1, link(file2_s1d2, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, link(file2_s1d3, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + + ASSERT_EQ(0, link(file2_s1d2, file1_s1d2)); + ASSERT_EQ(0, link(file2_s1d3, file1_s1d3)); +} + +TEST_F_FORK(layout1, reparent_rename) +{ + /* Same rules as for reparent_link. */ + const struct rule layer1[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_MAKE_REG, + }, + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + .path = dir_s2d2, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + .path = dir_s2d3, + .access = LANDLOCK_ACCESS_FS_MAKE_REG, + }, + {}, + }; + const int ruleset_fd = create_ruleset( + _metadata, + LANDLOCK_ACCESS_FS_MAKE_REG | LANDLOCK_ACCESS_FS_REFER, layer1); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, unlink(file1_s1d3)); + + /* Denies renaming because of missing MAKE_REG. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file2_s1d1, AT_FDCWD, file1_s1d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, file2_s1d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + /* Even denies same file exchange. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file2_s1d1, AT_FDCWD, file2_s1d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* Denies renaming because of missing source and destination REFER. */ + ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + /* + * Denies renaming because of missing MAKE_REG, source and destination + * REFER. + */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file2_s1d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file2_s1d1, AT_FDCWD, file1_s2d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* Denies renaming because of missing source REFER. */ + ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + /* Denies renaming because of missing MAKE_REG. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file2_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* Denies renaming because of missing MAKE_REG. */ + ASSERT_EQ(-1, rename(file1_s2d2, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + /* Denies renaming because of missing destination REFER*/ + ASSERT_EQ(-1, rename(file1_s2d2, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + + /* Denies exchange because of one missing MAKE_REG. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, file2_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + /* Allows renaming because of REFER and MAKE_REG. */ + ASSERT_EQ(0, rename(file1_s2d2, file1_s1d3)); + + /* Reverse renaming denied because of missing MAKE_REG. */ + ASSERT_EQ(-1, rename(file1_s1d3, file1_s2d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(0, unlink(file1_s2d3)); + ASSERT_EQ(0, rename(file1_s1d3, file1_s2d3)); + + /* Tests reverse renaming. */ + ASSERT_EQ(0, rename(file1_s2d3, file1_s1d3)); + ASSERT_EQ(0, renameat2(AT_FDCWD, file2_s2d3, AT_FDCWD, file1_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(0, rename(file1_s1d3, file1_s2d3)); + + /* + * This is OK for a file rename, but it should not be allowed for a + * directory rename (because of the superset of access rights). + */ + ASSERT_EQ(0, rename(file1_s2d3, file1_s1d3)); + ASSERT_EQ(0, rename(file1_s1d3, file1_s2d3)); + + /* + * Tests superset restrictions applied to directories. Not only the + * dir_s2d3's parent (dir_s2d2) should be taken into account but also + * access rights tied to dir_s2d3. dir_s2d2 is missing one access right + * compared to dir_s1d3/file1_s1d3 (MAKE_REG) but it is provided + * directly by the moved dir_s2d3. + */ + ASSERT_EQ(0, rename(dir_s2d3, file1_s1d3)); + ASSERT_EQ(0, rename(file1_s1d3, dir_s2d3)); + /* + * The first rename is allowed but not the exchange because dir_s1d3's + * parent (dir_s1d2) doesn't have REFER. + */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, dir_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, file1_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, rename(file1_s2d3, dir_s1d3)); + ASSERT_EQ(EXDEV, errno); + + ASSERT_EQ(-1, rename(file2_s1d2, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, rename(file2_s1d3, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + + /* Renaming in the same directory is always allowed. */ + ASSERT_EQ(0, rename(file2_s1d2, file1_s1d2)); + ASSERT_EQ(0, rename(file2_s1d3, file1_s1d3)); + + ASSERT_EQ(0, unlink(file1_s1d2)); + /* Denies because of missing source MAKE_REG and destination REFER. */ + ASSERT_EQ(-1, rename(dir_s2d3, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + + ASSERT_EQ(0, unlink(file1_s1d3)); + /* Denies because of missing source MAKE_REG and REFER. */ + ASSERT_EQ(-1, rename(dir_s2d2, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); +} + +static void +reparent_exdev_layers_enforce1(struct __test_metadata *const _metadata) +{ + const struct rule layer1[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + /* Interesting for the layer2 tests. */ + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_MAKE_REG, + }, + { + .path = dir_s2d2, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + .path = dir_s2d3, + .access = LANDLOCK_ACCESS_FS_MAKE_REG, + }, + {}, + }; + const int ruleset_fd = create_ruleset( + _metadata, + LANDLOCK_ACCESS_FS_MAKE_REG | LANDLOCK_ACCESS_FS_REFER, layer1); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); +} + +static void +reparent_exdev_layers_enforce2(struct __test_metadata *const _metadata) +{ + const struct rule layer2[] = { + { + .path = dir_s2d3, + .access = LANDLOCK_ACCESS_FS_MAKE_DIR, + }, + {}, + }; + /* + * Same checks as before but with a second layer and a new MAKE_DIR + * rule (and no explicit handling of REFER). + */ + const int ruleset_fd = + create_ruleset(_metadata, LANDLOCK_ACCESS_FS_MAKE_DIR, layer2); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); +} + +TEST_F_FORK(layout1, reparent_exdev_layers_rename1) +{ + ASSERT_EQ(0, unlink(file1_s2d2)); + ASSERT_EQ(0, unlink(file1_s2d3)); + + reparent_exdev_layers_enforce1(_metadata); + + /* + * Moving the dir_s1d3 directory below dir_s2d2 is allowed by Landlock + * because it doesn't inherit new access rights. + */ + ASSERT_EQ(0, rename(dir_s1d3, file1_s2d2)); + ASSERT_EQ(0, rename(file1_s2d2, dir_s1d3)); + + /* + * Moving the dir_s1d3 directory below dir_s2d3 is allowed, even if it + * gets a new inherited access rights (MAKE_REG), because MAKE_REG is + * already allowed for dir_s1d3. + */ + ASSERT_EQ(0, rename(dir_s1d3, file1_s2d3)); + ASSERT_EQ(0, rename(file1_s2d3, dir_s1d3)); + + /* + * However, moving the file1_s1d3 file below dir_s2d3 is allowed + * because it cannot inherit MAKE_REG right (which is dedicated to + * directories). + */ + ASSERT_EQ(0, rename(file1_s1d3, file1_s2d3)); + + reparent_exdev_layers_enforce2(_metadata); + + /* + * Moving the dir_s1d3 directory below dir_s2d2 is now denied because + * MAKE_DIR is not tied to dir_s2d2. + */ + ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d2)); + ASSERT_EQ(EACCES, errno); + + /* + * Moving the dir_s1d3 directory below dir_s2d3 is forbidden because it + * would grants MAKE_REG and MAKE_DIR rights to it. + */ + ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d3)); + ASSERT_EQ(EXDEV, errno); + + /* + * However, moving the file2_s1d3 file below dir_s2d3 is allowed + * because it cannot inherit MAKE_REG nor MAKE_DIR rights (which are + * dedicated to directories). + */ + ASSERT_EQ(0, rename(file2_s1d3, file1_s2d3)); +} + +TEST_F_FORK(layout1, reparent_exdev_layers_rename2) +{ + reparent_exdev_layers_enforce1(_metadata); + + /* Checks EACCES predominance over EXDEV. */ + ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, rename(file1_s1d2, file1_s2d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d3)); + ASSERT_EQ(EXDEV, errno); + /* Modify layout! */ + ASSERT_EQ(0, rename(file1_s1d2, file1_s2d3)); + + /* Without REFER source. */ + ASSERT_EQ(-1, rename(dir_s1d1, file1_s2d2)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, rename(dir_s1d2, file1_s2d2)); + ASSERT_EQ(EXDEV, errno); + + reparent_exdev_layers_enforce2(_metadata); + + /* Checks EACCES predominance over EXDEV. */ + ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d2)); + ASSERT_EQ(EACCES, errno); + /* Checks with actual file2_s1d2. */ + ASSERT_EQ(-1, rename(file2_s1d2, file1_s2d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d3)); + ASSERT_EQ(EXDEV, errno); + /* Modify layout! */ + ASSERT_EQ(0, rename(file2_s1d2, file1_s2d3)); + + /* Without REFER source, EACCES wins over EXDEV. */ + ASSERT_EQ(-1, rename(dir_s1d1, file1_s2d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, rename(dir_s1d2, file1_s2d2)); + ASSERT_EQ(EACCES, errno); +} + +TEST_F_FORK(layout1, reparent_exdev_layers_exchange1) +{ + const char *const dir_file1_s1d2 = file1_s1d2, *const dir_file2_s2d3 = + file2_s2d3; + + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, mkdir(file1_s1d2, 0700)); + ASSERT_EQ(0, unlink(file2_s2d3)); + ASSERT_EQ(0, mkdir(file2_s2d3, 0700)); + + reparent_exdev_layers_enforce1(_metadata); + + /* Error predominance with file exchange: returns EXDEV and EACCES. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, file1_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, file1_s1d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* + * Checks with directories which creation could be allowed, but denied + * because of access rights that would be inherited. + */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file1_s1d2, AT_FDCWD, + dir_file2_s2d3, RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, + dir_file1_s1d2, RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + + /* Checks with same access rights. */ + ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, dir_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_s1d3, + RENAME_EXCHANGE)); + + /* Checks with different (child-only) access rights. */ + ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_file1_s1d2, + RENAME_EXCHANGE)); + ASSERT_EQ(0, renameat2(AT_FDCWD, dir_file1_s1d2, AT_FDCWD, dir_s2d3, + RENAME_EXCHANGE)); + + /* + * Checks that exchange between file and directory are consistent. + * + * Moving a file (file1_s2d2) to a directory which only grants more + * directory-related access rights is allowed, and at the same time + * moving a directory (dir_file2_s2d3) to another directory which + * grants less access rights is allowed too. + * + * See layout1.reparent_exdev_layers_exchange3 for inverted arguments. + */ + ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_file2_s2d3, + RENAME_EXCHANGE)); + /* + * However, moving back the directory is denied because it would get + * more access rights than the current state and because file creation + * is forbidden (in dir_s2d2). + */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, file1_s2d2, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_file2_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + reparent_exdev_layers_enforce2(_metadata); + + /* Error predominance with file exchange: returns EXDEV and EACCES. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, file1_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, file1_s1d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* Checks with directories which creation is now denied. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file1_s1d2, AT_FDCWD, + dir_file2_s2d3, RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, + dir_file1_s1d2, RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* Checks with different (child-only) access rights. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, dir_s2d3, + RENAME_EXCHANGE)); + /* Denied because of MAKE_DIR. */ + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* Checks with different (child-only) access rights. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_file1_s1d2, + RENAME_EXCHANGE)); + /* Denied because of MAKE_DIR. */ + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file1_s1d2, AT_FDCWD, dir_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* See layout1.reparent_exdev_layers_exchange2 for complement. */ +} + +TEST_F_FORK(layout1, reparent_exdev_layers_exchange2) +{ + const char *const dir_file2_s2d3 = file2_s2d3; + + ASSERT_EQ(0, unlink(file2_s2d3)); + ASSERT_EQ(0, mkdir(file2_s2d3, 0700)); + + reparent_exdev_layers_enforce1(_metadata); + reparent_exdev_layers_enforce2(_metadata); + + /* Checks that exchange between file and directory are consistent. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_file2_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, file1_s2d2, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); +} + +TEST_F_FORK(layout1, reparent_exdev_layers_exchange3) +{ + const char *const dir_file2_s2d3 = file2_s2d3; + + ASSERT_EQ(0, unlink(file2_s2d3)); + ASSERT_EQ(0, mkdir(file2_s2d3, 0700)); + + reparent_exdev_layers_enforce1(_metadata); + + /* + * Checks that exchange between file and directory are consistent, + * including with inverted arguments (see + * layout1.reparent_exdev_layers_exchange1). + */ + ASSERT_EQ(0, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, file1_s2d2, + RENAME_EXCHANGE)); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_file2_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, file1_s2d2, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); +} + +TEST_F_FORK(layout1, reparent_remove) +{ + const struct rule layer1[] = { + { + .path = dir_s1d1, + .access = LANDLOCK_ACCESS_FS_REFER | + LANDLOCK_ACCESS_FS_REMOVE_DIR, + }, + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, + }, + { + .path = dir_s2d1, + .access = LANDLOCK_ACCESS_FS_REFER | + LANDLOCK_ACCESS_FS_REMOVE_FILE, + }, + {}, + }; + const int ruleset_fd = create_ruleset( + _metadata, + LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_REMOVE_DIR | + LANDLOCK_ACCESS_FS_REMOVE_FILE, + layer1); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Access denied because of wrong/swapped remove file/dir. */ + ASSERT_EQ(-1, rename(file1_s1d1, dir_s2d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, rename(dir_s2d2, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, dir_s2d2, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, dir_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* Access allowed thanks to the matching rights. */ + ASSERT_EQ(-1, rename(file1_s2d1, dir_s1d2)); + ASSERT_EQ(EISDIR, errno); + ASSERT_EQ(-1, rename(dir_s1d2, file1_s2d1)); + ASSERT_EQ(ENOTDIR, errno); + ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d1)); + ASSERT_EQ(ENOTDIR, errno); + ASSERT_EQ(0, unlink(file1_s2d1)); + ASSERT_EQ(0, unlink(file1_s1d3)); + ASSERT_EQ(0, unlink(file2_s1d3)); + ASSERT_EQ(0, rename(dir_s1d3, file1_s2d1)); + + /* Effectively removes a file and a directory by exchanging them. */ + ASSERT_EQ(0, mkdir(dir_s1d3, 0700)); + ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); +} + +TEST_F_FORK(layout1, reparent_dom_superset) +{ + const struct rule layer1[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + .path = file1_s1d2, + .access = LANDLOCK_ACCESS_FS_EXECUTE, + }, + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_MAKE_SOCK | + LANDLOCK_ACCESS_FS_EXECUTE, + }, + { + .path = dir_s2d2, + .access = LANDLOCK_ACCESS_FS_REFER | + LANDLOCK_ACCESS_FS_EXECUTE | + LANDLOCK_ACCESS_FS_MAKE_SOCK, + }, + { + .path = dir_s2d3, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_MAKE_FIFO, + }, + {}, + }; + int ruleset_fd = create_ruleset(_metadata, + LANDLOCK_ACCESS_FS_REFER | + LANDLOCK_ACCESS_FS_EXECUTE | + LANDLOCK_ACCESS_FS_MAKE_SOCK | + LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_MAKE_FIFO, + layer1); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(-1, rename(file1_s1d2, file1_s2d1)); + ASSERT_EQ(EXDEV, errno); + /* + * Moving file1_s1d2 beneath dir_s2d3 would grant it the READ_FILE + * access right. + */ + ASSERT_EQ(-1, rename(file1_s1d2, file1_s2d3)); + ASSERT_EQ(EXDEV, errno); + /* + * Moving file1_s1d2 should be allowed even if dir_s2d2 grants a + * superset of access rights compared to dir_s1d2, because file1_s1d2 + * already has these access rights anyway. + */ + ASSERT_EQ(0, rename(file1_s1d2, file1_s2d2)); + ASSERT_EQ(0, rename(file1_s2d2, file1_s1d2)); + + ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d1)); + ASSERT_EQ(EXDEV, errno); + /* + * Moving dir_s1d3 beneath dir_s2d3 would grant it the MAKE_FIFO access + * right. + */ + ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d3)); + ASSERT_EQ(EXDEV, errno); + /* + * Moving dir_s1d3 should be allowed even if dir_s2d2 grants a superset + * of access rights compared to dir_s1d2, because dir_s1d3 already has + * these access rights anyway. + */ + ASSERT_EQ(0, rename(dir_s1d3, file1_s2d2)); + ASSERT_EQ(0, rename(file1_s2d2, dir_s1d3)); + + /* + * Moving file1_s2d3 beneath dir_s1d2 is allowed, but moving it back + * will be denied because the new inherited access rights from dir_s1d2 + * will be less than the destination (original) dir_s2d3. This is a + * sinkhole scenario where we cannot move back files or directories. + */ + ASSERT_EQ(0, rename(file1_s2d3, file2_s1d2)); + ASSERT_EQ(-1, rename(file2_s1d2, file1_s2d3)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(0, unlink(file2_s1d2)); + ASSERT_EQ(0, unlink(file2_s2d3)); + /* + * Checks similar directory one-way move: dir_s2d3 loses EXECUTE and + * MAKE_SOCK which were inherited from dir_s1d3. + */ + ASSERT_EQ(0, rename(dir_s2d3, file2_s1d2)); + ASSERT_EQ(-1, rename(file2_s1d2, dir_s2d3)); + ASSERT_EQ(EXDEV, errno); +} + TEST_F_FORK(layout1, remove_dir) { const struct rule rules[] = { @@ -1750,10 +2694,10 @@ TEST_F_FORK(layout1, remove_dir) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_REMOVE_DIR, }, - {} + {}, }; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const int ruleset_fd = + create_ruleset(_metadata, rules[0].access, rules); ASSERT_LE(0, ruleset_fd); @@ -1787,10 +2731,10 @@ TEST_F_FORK(layout1, remove_file) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, }, - {} + {}, }; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const int ruleset_fd = + create_ruleset(_metadata, rules[0].access, rules); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); @@ -1805,14 +2749,15 @@ TEST_F_FORK(layout1, remove_file) } static void test_make_file(struct __test_metadata *const _metadata, - const __u64 access, const mode_t mode, const dev_t dev) + const __u64 access, const mode_t mode, + const dev_t dev) { const struct rule rules[] = { { .path = dir_s1d2, .access = access, }, - {} + {}, }; const int ruleset_fd = create_ruleset(_metadata, access, rules); @@ -1820,9 +2765,10 @@ static void test_make_file(struct __test_metadata *const _metadata, ASSERT_EQ(0, unlink(file1_s1d1)); ASSERT_EQ(0, unlink(file2_s1d1)); - ASSERT_EQ(0, mknod(file2_s1d1, mode | 0400, dev)) { - TH_LOG("Failed to make file \"%s\": %s", - file2_s1d1, strerror(errno)); + ASSERT_EQ(0, mknod(file2_s1d1, mode | 0400, dev)) + { + TH_LOG("Failed to make file \"%s\": %s", file2_s1d1, + strerror(errno)); }; ASSERT_EQ(0, unlink(file1_s1d2)); @@ -1841,9 +2787,10 @@ static void test_make_file(struct __test_metadata *const _metadata, ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1)); ASSERT_EQ(EACCES, errno); - ASSERT_EQ(0, mknod(file1_s1d2, mode | 0400, dev)) { - TH_LOG("Failed to make file \"%s\": %s", - file1_s1d2, strerror(errno)); + ASSERT_EQ(0, mknod(file1_s1d2, mode | 0400, dev)) + { + TH_LOG("Failed to make file \"%s\": %s", file1_s1d2, + strerror(errno)); }; ASSERT_EQ(0, link(file1_s1d2, file2_s1d2)); ASSERT_EQ(0, unlink(file2_s1d2)); @@ -1860,7 +2807,7 @@ TEST_F_FORK(layout1, make_char) /* Creates a /dev/null device. */ set_cap(_metadata, CAP_MKNOD); test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_CHAR, S_IFCHR, - makedev(1, 3)); + makedev(1, 3)); } TEST_F_FORK(layout1, make_block) @@ -1868,7 +2815,7 @@ TEST_F_FORK(layout1, make_block) /* Creates a /dev/loop0 device. */ set_cap(_metadata, CAP_MKNOD); test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_BLOCK, S_IFBLK, - makedev(7, 0)); + makedev(7, 0)); } TEST_F_FORK(layout1, make_reg_1) @@ -1898,10 +2845,10 @@ TEST_F_FORK(layout1, make_sym) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_MAKE_SYM, }, - {} + {}, }; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const int ruleset_fd = + create_ruleset(_metadata, rules[0].access, rules); ASSERT_LE(0, ruleset_fd); @@ -1943,10 +2890,10 @@ TEST_F_FORK(layout1, make_dir) .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_MAKE_DIR, }, - {} + {}, }; - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const int ruleset_fd = + create_ruleset(_metadata, rules[0].access, rules); ASSERT_LE(0, ruleset_fd); @@ -1965,12 +2912,12 @@ TEST_F_FORK(layout1, make_dir) } static int open_proc_fd(struct __test_metadata *const _metadata, const int fd, - const int open_flags) + const int open_flags) { static const char path_template[] = "/proc/self/fd/%d"; char procfd_path[sizeof(path_template) + 10]; - const int procfd_path_size = snprintf(procfd_path, sizeof(procfd_path), - path_template, fd); + const int procfd_path_size = + snprintf(procfd_path, sizeof(procfd_path), path_template, fd); ASSERT_LT(procfd_path_size, sizeof(procfd_path)); return open(procfd_path, open_flags); @@ -1983,12 +2930,13 @@ TEST_F_FORK(layout1, proc_unlinked_file) .path = file1_s1d2, .access = LANDLOCK_ACCESS_FS_READ_FILE, }, - {} + {}, }; int reg_fd, proc_fd; - const int ruleset_fd = create_ruleset(_metadata, - LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, rules); + const int ruleset_fd = create_ruleset( + _metadata, + LANDLOCK_ACCESS_FS_READ_FILE | LANDLOCK_ACCESS_FS_WRITE_FILE, + rules); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); @@ -2005,9 +2953,10 @@ TEST_F_FORK(layout1, proc_unlinked_file) ASSERT_EQ(0, close(proc_fd)); proc_fd = open_proc_fd(_metadata, reg_fd, O_RDWR | O_CLOEXEC); - ASSERT_EQ(-1, proc_fd) { - TH_LOG("Successfully opened /proc/self/fd/%d: %s", - reg_fd, strerror(errno)); + ASSERT_EQ(-1, proc_fd) + { + TH_LOG("Successfully opened /proc/self/fd/%d: %s", reg_fd, + strerror(errno)); } ASSERT_EQ(EACCES, errno); @@ -2023,13 +2972,13 @@ TEST_F_FORK(layout1, proc_pipe) { .path = dir_s1d2, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, - {} + {}, }; /* Limits read and write access to files tied to the filesystem. */ - const int ruleset_fd = create_ruleset(_metadata, rules[0].access, - rules); + const int ruleset_fd = + create_ruleset(_metadata, rules[0].access, rules); ASSERT_LE(0, ruleset_fd); enforce_ruleset(_metadata, ruleset_fd); @@ -2041,7 +2990,8 @@ TEST_F_FORK(layout1, proc_pipe) /* Checks access to pipes through FD. */ ASSERT_EQ(0, pipe2(pipe_fds, O_CLOEXEC)); - ASSERT_EQ(1, write(pipe_fds[1], ".", 1)) { + ASSERT_EQ(1, write(pipe_fds[1], ".", 1)) + { TH_LOG("Failed to write in pipe: %s", strerror(errno)); } ASSERT_EQ(1, read(pipe_fds[0], &buf, 1)); @@ -2050,9 +3000,10 @@ TEST_F_FORK(layout1, proc_pipe) /* Checks write access to pipe through /proc/self/fd . */ proc_fd = open_proc_fd(_metadata, pipe_fds[1], O_WRONLY | O_CLOEXEC); ASSERT_LE(0, proc_fd); - ASSERT_EQ(1, write(proc_fd, ".", 1)) { + ASSERT_EQ(1, write(proc_fd, ".", 1)) + { TH_LOG("Failed to write through /proc/self/fd/%d: %s", - pipe_fds[1], strerror(errno)); + pipe_fds[1], strerror(errno)); } ASSERT_EQ(0, close(proc_fd)); @@ -2060,9 +3011,10 @@ TEST_F_FORK(layout1, proc_pipe) proc_fd = open_proc_fd(_metadata, pipe_fds[0], O_RDONLY | O_CLOEXEC); ASSERT_LE(0, proc_fd); buf = '\0'; - ASSERT_EQ(1, read(proc_fd, &buf, 1)) { + ASSERT_EQ(1, read(proc_fd, &buf, 1)) + { TH_LOG("Failed to read through /proc/self/fd/%d: %s", - pipe_fds[1], strerror(errno)); + pipe_fds[1], strerror(errno)); } ASSERT_EQ(0, close(proc_fd)); @@ -2070,8 +3022,9 @@ TEST_F_FORK(layout1, proc_pipe) ASSERT_EQ(0, close(pipe_fds[1])); } -FIXTURE(layout1_bind) { -}; +/* clang-format off */ +FIXTURE(layout1_bind) {}; +/* clang-format on */ FIXTURE_SETUP(layout1_bind) { @@ -2161,7 +3114,7 @@ TEST_F_FORK(layout1_bind, same_content_same_file) .path = dir_s2d1, .access = ACCESS_RW, }, - {} + {}, }; /* * Sets access rights on the same bind-mounted directories. The result @@ -2177,7 +3130,7 @@ TEST_F_FORK(layout1_bind, same_content_same_file) .path = dir_s2d2, .access = ACCESS_RW, }, - {} + {}, }; /* Only allow read-access to the s1d3 hierarchies. */ const struct rule layer3_source[] = { @@ -2185,7 +3138,7 @@ TEST_F_FORK(layout1_bind, same_content_same_file) .path = dir_s1d3, .access = LANDLOCK_ACCESS_FS_READ_FILE, }, - {} + {}, }; /* Removes all access rights. */ const struct rule layer4_destination[] = { @@ -2193,7 +3146,7 @@ TEST_F_FORK(layout1_bind, same_content_same_file) .path = bind_file1_s1d3, .access = LANDLOCK_ACCESS_FS_WRITE_FILE, }, - {} + {}, }; int ruleset_fd; @@ -2282,8 +3235,46 @@ TEST_F_FORK(layout1_bind, same_content_same_file) ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_WRONLY)); } -#define LOWER_BASE TMP_DIR "/lower" -#define LOWER_DATA LOWER_BASE "/data" +TEST_F_FORK(layout1_bind, reparent_cross_mount) +{ + const struct rule layer1[] = { + { + /* dir_s2d1 is beneath the dir_s2d2 mount point. */ + .path = dir_s2d1, + .access = LANDLOCK_ACCESS_FS_REFER, + }, + { + .path = bind_dir_s1d3, + .access = LANDLOCK_ACCESS_FS_EXECUTE, + }, + {}, + }; + int ruleset_fd = create_ruleset( + _metadata, + LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_EXECUTE, layer1); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks basic denied move. */ + ASSERT_EQ(-1, rename(file1_s1d1, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + + /* Checks real cross-mount move (Landlock is not involved). */ + ASSERT_EQ(-1, rename(file1_s2d1, file1_s2d2)); + ASSERT_EQ(EXDEV, errno); + + /* Checks move that will give more accesses. */ + ASSERT_EQ(-1, rename(file1_s2d2, bind_file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + + /* Checks legitimate downgrade move. */ + ASSERT_EQ(0, rename(bind_file1_s1d3, file1_s2d2)); +} + +#define LOWER_BASE TMP_DIR "/lower" +#define LOWER_DATA LOWER_BASE "/data" static const char lower_fl1[] = LOWER_DATA "/fl1"; static const char lower_dl1[] = LOWER_DATA "/dl1"; static const char lower_dl1_fl2[] = LOWER_DATA "/dl1/fl2"; @@ -2295,23 +3286,23 @@ static const char lower_do1_fl3[] = LOWER_DATA "/do1/fl3"; static const char (*lower_base_files[])[] = { &lower_fl1, &lower_fo1, - NULL + NULL, }; static const char (*lower_base_directories[])[] = { &lower_dl1, &lower_do1, - NULL + NULL, }; static const char (*lower_sub_files[])[] = { &lower_dl1_fl2, &lower_do1_fo2, &lower_do1_fl3, - NULL + NULL, }; -#define UPPER_BASE TMP_DIR "/upper" -#define UPPER_DATA UPPER_BASE "/data" -#define UPPER_WORK UPPER_BASE "/work" +#define UPPER_BASE TMP_DIR "/upper" +#define UPPER_DATA UPPER_BASE "/data" +#define UPPER_WORK UPPER_BASE "/work" static const char upper_fu1[] = UPPER_DATA "/fu1"; static const char upper_du1[] = UPPER_DATA "/du1"; static const char upper_du1_fu2[] = UPPER_DATA "/du1/fu2"; @@ -2323,22 +3314,22 @@ static const char upper_do1_fu3[] = UPPER_DATA "/do1/fu3"; static const char (*upper_base_files[])[] = { &upper_fu1, &upper_fo1, - NULL + NULL, }; static const char (*upper_base_directories[])[] = { &upper_du1, &upper_do1, - NULL + NULL, }; static const char (*upper_sub_files[])[] = { &upper_du1_fu2, &upper_do1_fo2, &upper_do1_fu3, - NULL + NULL, }; -#define MERGE_BASE TMP_DIR "/merge" -#define MERGE_DATA MERGE_BASE "/data" +#define MERGE_BASE TMP_DIR "/merge" +#define MERGE_DATA MERGE_BASE "/data" static const char merge_fl1[] = MERGE_DATA "/fl1"; static const char merge_dl1[] = MERGE_DATA "/dl1"; static const char merge_dl1_fl2[] = MERGE_DATA "/dl1/fl2"; @@ -2355,21 +3346,17 @@ static const char (*merge_base_files[])[] = { &merge_fl1, &merge_fu1, &merge_fo1, - NULL + NULL, }; static const char (*merge_base_directories[])[] = { &merge_dl1, &merge_du1, &merge_do1, - NULL + NULL, }; static const char (*merge_sub_files[])[] = { - &merge_dl1_fl2, - &merge_du1_fu2, - &merge_do1_fo2, - &merge_do1_fl3, - &merge_do1_fu3, - NULL + &merge_dl1_fl2, &merge_du1_fu2, &merge_do1_fo2, + &merge_do1_fl3, &merge_do1_fu3, NULL, }; /* @@ -2411,8 +3398,9 @@ static const char (*merge_sub_files[])[] = { * └── work */ -FIXTURE(layout2_overlay) { -}; +/* clang-format off */ +FIXTURE(layout2_overlay) {}; +/* clang-format on */ FIXTURE_SETUP(layout2_overlay) { @@ -2444,9 +3432,8 @@ FIXTURE_SETUP(layout2_overlay) set_cap(_metadata, CAP_SYS_ADMIN); set_cap(_metadata, CAP_DAC_OVERRIDE); ASSERT_EQ(0, mount("overlay", MERGE_DATA, "overlay", 0, - "lowerdir=" LOWER_DATA - ",upperdir=" UPPER_DATA - ",workdir=" UPPER_WORK)); + "lowerdir=" LOWER_DATA ",upperdir=" UPPER_DATA + ",workdir=" UPPER_WORK)); clear_cap(_metadata, CAP_DAC_OVERRIDE); clear_cap(_metadata, CAP_SYS_ADMIN); } @@ -2513,9 +3500,9 @@ TEST_F_FORK(layout2_overlay, no_restriction) ASSERT_EQ(0, test_open(merge_do1_fu3, O_RDONLY)); } -#define for_each_path(path_list, path_entry, i) \ - for (i = 0, path_entry = *path_list[i]; path_list[i]; \ - path_entry = *path_list[++i]) +#define for_each_path(path_list, path_entry, i) \ + for (i = 0, path_entry = *path_list[i]; path_list[i]; \ + path_entry = *path_list[++i]) TEST_F_FORK(layout2_overlay, same_content_different_file) { @@ -2533,7 +3520,7 @@ TEST_F_FORK(layout2_overlay, same_content_different_file) .path = MERGE_BASE, .access = ACCESS_RW, }, - {} + {}, }; const struct rule layer2_data[] = { { @@ -2548,7 +3535,7 @@ TEST_F_FORK(layout2_overlay, same_content_different_file) .path = MERGE_DATA, .access = ACCESS_RW, }, - {} + {}, }; /* Sets access right on directories inside both layers. */ const struct rule layer3_subdirs[] = { @@ -2580,7 +3567,7 @@ TEST_F_FORK(layout2_overlay, same_content_different_file) .path = merge_do1, .access = ACCESS_RW, }, - {} + {}, }; /* Tighten access rights to the files. */ const struct rule layer4_files[] = { @@ -2611,37 +3598,37 @@ TEST_F_FORK(layout2_overlay, same_content_different_file) { .path = merge_dl1_fl2, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, { .path = merge_du1_fu2, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, { .path = merge_do1_fo2, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, { .path = merge_do1_fl3, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, { .path = merge_do1_fu3, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, - {} + {}, }; const struct rule layer5_merge_only[] = { { .path = MERGE_DATA, .access = LANDLOCK_ACCESS_FS_READ_FILE | - LANDLOCK_ACCESS_FS_WRITE_FILE, + LANDLOCK_ACCESS_FS_WRITE_FILE, }, - {} + {}, }; int ruleset_fd; size_t i; @@ -2659,7 +3646,8 @@ TEST_F_FORK(layout2_overlay, same_content_different_file) ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY)); } for_each_path(lower_base_directories, path_entry, i) { - ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + ASSERT_EQ(EACCES, + test_open(path_entry, O_RDONLY | O_DIRECTORY)); } for_each_path(lower_sub_files, path_entry, i) { ASSERT_EQ(0, test_open(path_entry, O_RDONLY)); @@ -2671,7 +3659,8 @@ TEST_F_FORK(layout2_overlay, same_content_different_file) ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY)); } for_each_path(upper_base_directories, path_entry, i) { - ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + ASSERT_EQ(EACCES, + test_open(path_entry, O_RDONLY | O_DIRECTORY)); } for_each_path(upper_sub_files, path_entry, i) { ASSERT_EQ(0, test_open(path_entry, O_RDONLY)); @@ -2756,7 +3745,8 @@ TEST_F_FORK(layout2_overlay, same_content_different_file) ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR)); } for_each_path(merge_base_directories, path_entry, i) { - ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + ASSERT_EQ(EACCES, + test_open(path_entry, O_RDONLY | O_DIRECTORY)); } for_each_path(merge_sub_files, path_entry, i) { ASSERT_EQ(0, test_open(path_entry, O_RDWR)); @@ -2781,7 +3771,8 @@ TEST_F_FORK(layout2_overlay, same_content_different_file) ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR)); } for_each_path(merge_base_directories, path_entry, i) { - ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + ASSERT_EQ(EACCES, + test_open(path_entry, O_RDONLY | O_DIRECTORY)); } for_each_path(merge_sub_files, path_entry, i) { ASSERT_EQ(0, test_open(path_entry, O_RDWR)); diff --git a/tools/testing/selftests/landlock/ptrace_test.c b/tools/testing/selftests/landlock/ptrace_test.c index 15fbef9cc849..c28ef98ff3ac 100644 --- a/tools/testing/selftests/landlock/ptrace_test.c +++ b/tools/testing/selftests/landlock/ptrace_test.c @@ -26,9 +26,10 @@ static void create_domain(struct __test_metadata *const _metadata) .handled_access_fs = LANDLOCK_ACCESS_FS_MAKE_BLOCK, }; - ruleset_fd = landlock_create_ruleset(&ruleset_attr, - sizeof(ruleset_attr), 0); - EXPECT_LE(0, ruleset_fd) { + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + EXPECT_LE(0, ruleset_fd) + { TH_LOG("Failed to create a ruleset: %s", strerror(errno)); } EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); @@ -43,7 +44,7 @@ static int test_ptrace_read(const pid_t pid) int procenv_path_size, fd; procenv_path_size = snprintf(procenv_path, sizeof(procenv_path), - path_template, pid); + path_template, pid); if (procenv_path_size >= sizeof(procenv_path)) return E2BIG; @@ -59,9 +60,12 @@ static int test_ptrace_read(const pid_t pid) return 0; } -FIXTURE(hierarchy) { }; +/* clang-format off */ +FIXTURE(hierarchy) {}; +/* clang-format on */ -FIXTURE_VARIANT(hierarchy) { +FIXTURE_VARIANT(hierarchy) +{ const bool domain_both; const bool domain_parent; const bool domain_child; @@ -83,7 +87,9 @@ FIXTURE_VARIANT(hierarchy) { * \ P2 -> P1 : allow * 'P2 */ +/* clang-format off */ FIXTURE_VARIANT_ADD(hierarchy, allow_without_domain) { + /* clang-format on */ .domain_both = false, .domain_parent = false, .domain_child = false, @@ -98,7 +104,9 @@ FIXTURE_VARIANT_ADD(hierarchy, allow_without_domain) { * | P2 | * '------' */ +/* clang-format off */ FIXTURE_VARIANT_ADD(hierarchy, allow_with_one_domain) { + /* clang-format on */ .domain_both = false, .domain_parent = false, .domain_child = true, @@ -112,7 +120,9 @@ FIXTURE_VARIANT_ADD(hierarchy, allow_with_one_domain) { * ' * P2 */ +/* clang-format off */ FIXTURE_VARIANT_ADD(hierarchy, deny_with_parent_domain) { + /* clang-format on */ .domain_both = false, .domain_parent = true, .domain_child = false, @@ -127,7 +137,9 @@ FIXTURE_VARIANT_ADD(hierarchy, deny_with_parent_domain) { * | P2 | * '------' */ +/* clang-format off */ FIXTURE_VARIANT_ADD(hierarchy, deny_with_sibling_domain) { + /* clang-format on */ .domain_both = false, .domain_parent = true, .domain_child = true, @@ -142,7 +154,9 @@ FIXTURE_VARIANT_ADD(hierarchy, deny_with_sibling_domain) { * | P2 | * '-------------' */ +/* clang-format off */ FIXTURE_VARIANT_ADD(hierarchy, allow_sibling_domain) { + /* clang-format on */ .domain_both = true, .domain_parent = false, .domain_child = false, @@ -158,7 +172,9 @@ FIXTURE_VARIANT_ADD(hierarchy, allow_sibling_domain) { * | '------' | * '-----------------' */ +/* clang-format off */ FIXTURE_VARIANT_ADD(hierarchy, allow_with_nested_domain) { + /* clang-format on */ .domain_both = true, .domain_parent = false, .domain_child = true, @@ -174,7 +190,9 @@ FIXTURE_VARIANT_ADD(hierarchy, allow_with_nested_domain) { * | P2 | * '-----------------' */ +/* clang-format off */ FIXTURE_VARIANT_ADD(hierarchy, deny_with_nested_and_parent_domain) { + /* clang-format on */ .domain_both = true, .domain_parent = true, .domain_child = false, @@ -192,17 +210,21 @@ FIXTURE_VARIANT_ADD(hierarchy, deny_with_nested_and_parent_domain) { * | '------' | * '-----------------' */ +/* clang-format off */ FIXTURE_VARIANT_ADD(hierarchy, deny_with_forked_domain) { + /* clang-format on */ .domain_both = true, .domain_parent = true, .domain_child = true, }; FIXTURE_SETUP(hierarchy) -{ } +{ +} FIXTURE_TEARDOWN(hierarchy) -{ } +{ +} /* Test PTRACE_TRACEME and PTRACE_ATTACH for parent and child. */ TEST_F(hierarchy, trace) @@ -330,7 +352,7 @@ TEST_F(hierarchy, trace) ASSERT_EQ(1, write(pipe_parent[1], ".", 1)); ASSERT_EQ(child, waitpid(child, &status, 0)); if (WIFSIGNALED(status) || !WIFEXITED(status) || - WEXITSTATUS(status) != EXIT_SUCCESS) + WEXITSTATUS(status) != EXIT_SUCCESS) _metadata->passed = 0; } diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh index c35ba24f994c..66d0414d8e4b 100644 --- a/tools/testing/selftests/rcutorture/bin/functions.sh +++ b/tools/testing/selftests/rcutorture/bin/functions.sh @@ -301,7 +301,7 @@ specify_qemu_cpus () { echo $2 -smp $3 ;; qemu-system-ppc64) - nt="`lscpu | grep '^NUMA node0' | sed -e 's/^[^,]*,\([0-9]*\),.*$/\1/'`" + nt="`lscpu | sed -n 's/^Thread(s) per core:\s*//p'`" echo $2 -smp cores=`expr \( $3 + $nt - 1 \) / $nt`,threads=$nt ;; esac diff --git a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh index 5f682fc892dd..88983cba7956 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh @@ -36,7 +36,7 @@ do then egrep "error:|warning:|^ld: .*undefined reference to" < $i > $i.diags files="$files $i.diags $i" - elif ! test -f ${scenariobasedir}/vmlinux + elif ! test -f ${scenariobasedir}/vmlinux && ! test -f "${rundir}/re-run" then echo No ${scenariobasedir}/vmlinux file > $i.diags files="$files $i.diags $i" diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh index 0a5419982ab3..0789c5606d2a 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh @@ -33,7 +33,12 @@ do TORTURE_SUITE="`cat $i/../torture_suite`" configfile=`echo $i | sed -e 's,^.*/,,'` rm -f $i/console.log.*.diags - kvm-recheck-${TORTURE_SUITE}.sh $i + case "${TORTURE_SUITE}" in + X*) + ;; + *) + kvm-recheck-${TORTURE_SUITE}.sh $i + esac if test -f "$i/qemu-retval" && test "`cat $i/qemu-retval`" -ne 0 && test "`cat $i/qemu-retval`" -ne 137 then echo QEMU error, output: diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh index 8c4c1e4792d0..0ff59bd8b640 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh @@ -138,14 +138,14 @@ chmod +x $T/bin/kvm-remote-*.sh # Check first to avoid the need for cleanup for system-name typos for i in $systems do - ncpus="`ssh $i getconf _NPROCESSORS_ONLN 2> /dev/null`" - echo $i: $ncpus CPUs " " `date` | tee -a "$oldrun/remote-log" + ncpus="`ssh -o BatchMode=yes $i getconf _NPROCESSORS_ONLN 2> /dev/null`" ret=$? if test "$ret" -ne 0 then echo System $i unreachable, giving up. | tee -a "$oldrun/remote-log" exit 4 fi + echo $i: $ncpus CPUs " " `date` | tee -a "$oldrun/remote-log" done # Download and expand the tarball on all systems. @@ -153,14 +153,14 @@ echo Build-products tarball: `du -h $T/binres.tgz` | tee -a "$oldrun/remote-log" for i in $systems do echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log" - cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -" + cat $T/binres.tgz | ssh -o BatchMode=yes $i "cd /tmp; tar -xzf -" ret=$? tries=0 while test "$ret" -ne 0 do echo Unable to download $T/binres.tgz to system $i, waiting and then retrying. $tries prior retries. | tee -a "$oldrun/remote-log" sleep 60 - cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -" + cat $T/binres.tgz | ssh -o BatchMode=yes $i "cd /tmp; tar -xzf -" ret=$? if test "$ret" -ne 0 then @@ -185,7 +185,7 @@ checkremotefile () { while : do - ssh $1 "test -f \"$2\"" + ssh -o BatchMode=yes $1 "test -f \"$2\"" ret=$? if test "$ret" -eq 255 then @@ -228,7 +228,7 @@ startbatches () { then continue # System still running last test, skip. fi - ssh "$i" "cd \"$resdir/$ds\"; touch remote.run; PATH=\"$T/bin:$PATH\" nohup kvm-remote-$curbatch.sh > kvm-remote-$curbatch.sh.out 2>&1 &" 1>&2 + ssh -o BatchMode=yes "$i" "cd \"$resdir/$ds\"; touch remote.run; PATH=\"$T/bin:$PATH\" nohup kvm-remote-$curbatch.sh > kvm-remote-$curbatch.sh.out 2>&1 &" 1>&2 ret=$? if test "$ret" -ne 0 then @@ -267,7 +267,7 @@ do sleep 30 done echo " ---" Collecting results from $i `date` | tee -a "$oldrun/remote-log" - ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - ) + ( cd "$oldrun"; ssh -o BatchMode=yes $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - ) done ( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log" diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 55b2c1533282..263e16aeca0e 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -44,6 +44,7 @@ TORTURE_KCONFIG_KASAN_ARG="" TORTURE_KCONFIG_KCSAN_ARG="" TORTURE_KMAKE_ARG="" TORTURE_QEMU_MEM=512 +torture_qemu_mem_default=1 TORTURE_REMOTE= TORTURE_SHUTDOWN_GRACE=180 TORTURE_SUITE=rcu @@ -86,7 +87,7 @@ usage () { echo " --remote" echo " --results absolute-pathname" echo " --shutdown-grace seconds" - echo " --torture lock|rcu|rcuscale|refscale|scf" + echo " --torture lock|rcu|rcuscale|refscale|scf|X*" echo " --trust-make" exit 1 } @@ -180,6 +181,10 @@ do ;; --kasan) TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG + if test -n "$torture_qemu_mem_default" + then + TORTURE_QEMU_MEM=2G + fi ;; --kconfig|--kconfigs) checkarg --kconfig "(Kconfig options)" $# "$2" '^CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\)\( CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\)\)*$' '^error$' @@ -202,6 +207,7 @@ do --memory) checkarg --memory "(memory size)" $# "$2" '^[0-9]\+[MG]\?$' error TORTURE_QEMU_MEM=$2 + torture_qemu_mem_default= shift ;; --no-initrd) @@ -231,7 +237,7 @@ do shift ;; --torture) - checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\|rcuscale\|refscale\|scf\)$' '^--' + checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\|rcuscale\|refscale\|scf\|X.*\)$' '^--' TORTURE_SUITE=$2 TORTURE_MOD="`echo $TORTURE_SUITE | sed -e 's/^\(lock\|rcu\|scf\)$/\1torture/'`" shift diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index bfe09e2829c8..d477618e7261 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -54,6 +54,7 @@ do_kvfree=yes do_kasan=yes do_kcsan=no do_clocksourcewd=yes +do_rt=yes # doyesno - Helper function for yes/no arguments function doyesno () { @@ -82,6 +83,7 @@ usage () { echo " --do-rcuscale / --do-no-rcuscale" echo " --do-rcutorture / --do-no-rcutorture" echo " --do-refscale / --do-no-refscale" + echo " --do-rt / --do-no-rt" echo " --do-scftorture / --do-no-scftorture" echo " --duration [ <minutes> | <hours>h | <days>d ]" echo " --kcsan-kmake-arg kernel-make-arguments" @@ -118,6 +120,7 @@ do do_scftorture=yes do_rcuscale=yes do_refscale=yes + do_rt=yes do_kvfree=yes do_kasan=yes do_kcsan=yes @@ -148,6 +151,7 @@ do do_scftorture=no do_rcuscale=no do_refscale=no + do_rt=no do_kvfree=no do_kasan=no do_kcsan=no @@ -162,6 +166,9 @@ do --do-refscale|--do-no-refscale) do_refscale=`doyesno "$1" --do-refscale` ;; + --do-rt|--do-no-rt) + do_rt=`doyesno "$1" --do-rt` + ;; --do-scftorture|--do-no-scftorture) do_scftorture=`doyesno "$1" --do-scftorture` ;; @@ -322,6 +329,7 @@ then echo " --- make clean" > "$amcdir/Make.out" 2>&1 make -j$MAKE_ALLOTED_CPUS clean >> "$amcdir/Make.out" 2>&1 echo " --- make allmodconfig" >> "$amcdir/Make.out" 2>&1 + cp .config $amcdir make -j$MAKE_ALLOTED_CPUS allmodconfig >> "$amcdir/Make.out" 2>&1 echo " --- make " >> "$amcdir/Make.out" 2>&1 make -j$MAKE_ALLOTED_CPUS >> "$amcdir/Make.out" 2>&1 @@ -350,8 +358,19 @@ fi if test "$do_scftorture" = "yes" then - torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot" - torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make + torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot csdlock_debug=1" + torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 2G --trust-make +fi + +if test "$do_rt" = "yes" +then + # With all post-boot grace periods forced to normal. + torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_normal=1" + torture_set "rcurttorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make + + # With all post-boot grace periods forced to expedited. + torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_expedited=1" + torture_set "rcurttorture-exp" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make fi if test "$do_refscale" = yes @@ -363,7 +382,7 @@ fi for prim in $primlist do torture_bootargs="refscale.scale_type="$prim" refscale.nreaders=$HALF_ALLOTED_CPUS refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot" - torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS torture.verbose_sleep_frequency=8 torture.verbose_sleep_duration=$VERBOSE_BATCH_CPUS" --trust-make + torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_TASKS_TRACE_RCU=y CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS torture.verbose_sleep_frequency=8 torture.verbose_sleep_duration=$VERBOSE_BATCH_CPUS" --trust-make done if test "$do_rcuscale" = yes @@ -375,13 +394,13 @@ fi for prim in $primlist do torture_bootargs="rcuscale.scale_type="$prim" rcuscale.nwriters=$HALF_ALLOTED_CPUS rcuscale.holdoff=20 torture.disable_onoff_at_boot" - torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make + torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_TASKS_TRACE_RCU=y CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make done if test "$do_kvfree" = "yes" then torture_bootargs="rcuscale.kfree_rcu_test=1 rcuscale.kfree_nthreads=16 rcuscale.holdoff=20 rcuscale.kfree_loops=10000 torture.disable_onoff_at_boot" - torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make + torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 2G --trust-make fi if test "$do_clocksourcewd" = "yes" diff --git a/tools/testing/selftests/rcutorture/configs/rcu/RUDE01 b/tools/testing/selftests/rcutorture/configs/rcu/RUDE01 index 7093422050f6..6fd6acb94518 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/RUDE01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/RUDE01 @@ -8,3 +8,5 @@ CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y #CHECK#CONFIG_PROVE_RCU=y CONFIG_RCU_EXPERT=y +CONFIG_FORCE_TASKS_RUDE_RCU=y +#CHECK#CONFIG_TASKS_RUDE_RCU=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N index 2da8b49589a0..07f5e0a70ae7 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N @@ -6,3 +6,5 @@ CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n #CHECK#CONFIG_RCU_EXPERT=n +CONFIG_KPROBES=n +CONFIG_FTRACE=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 index 3ca112444ce7..d84801b9a7ae 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 @@ -7,4 +7,5 @@ CONFIG_PREEMPT=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y #CHECK#CONFIG_PROVE_RCU=y +CONFIG_TASKS_RCU=y CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 index ad2be91e5ee7..2f9fcffff5ae 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 @@ -2,3 +2,7 @@ CONFIG_SMP=n CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n +#CHECK#CONFIG_TASKS_RCU=y +CONFIG_FORCE_TASKS_RCU=y +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot index cd2a188eeb6d..b9b6d67cbc5f 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot @@ -1 +1,2 @@ rcutorture.torture_type=tasks +rcutorture.stat_interval=60 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 index dc02083803ce..dea26c568678 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 @@ -7,3 +7,5 @@ CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=n CONFIG_NO_HZ_FULL=y #CHECK#CONFIG_RCU_EXPERT=n +CONFIG_TASKS_RCU=y +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TRACE01 b/tools/testing/selftests/rcutorture/configs/rcu/TRACE01 index e4d74e5fc1d0..85b407467454 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TRACE01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TRACE01 @@ -4,8 +4,11 @@ CONFIG_HOTPLUG_CPU=y CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_PROVE_LOCKING=n #CHECK#CONFIG_PROVE_RCU=n +CONFIG_FORCE_TASKS_TRACE_RCU=y +#CHECK#CONFIG_TASKS_TRACE_RCU=y CONFIG_TASKS_TRACE_RCU_READ_MB=y CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TRACE02 b/tools/testing/selftests/rcutorture/configs/rcu/TRACE02 index 77541eeb4e9f..093ea6e8e65c 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TRACE02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TRACE02 @@ -7,5 +7,7 @@ CONFIG_PREEMPT=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y #CHECK#CONFIG_PROVE_RCU=y +CONFIG_FORCE_TASKS_TRACE_RCU=y +#CHECK#CONFIG_TASKS_TRACE_RCU=y CONFIG_TASKS_TRACE_RCU_READ_MB=n CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 index 22ad0261728d..ae395981b5e5 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 @@ -1,8 +1,9 @@ CONFIG_SMP=y CONFIG_NR_CPUS=8 -CONFIG_PREEMPT_NONE=y -CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=y CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n #CHECK#CONFIG_TREE_RCU=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 index 2789b47e4ecd..d30922d8c883 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 @@ -3,6 +3,7 @@ CONFIG_NR_CPUS=16 CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n #CHECK#CONFIG_TREE_RCU=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 b/tools/testing/selftests/rcutorture/configs/rcu/TREE09 index 8523a7515cbf..fc45645bb5f4 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE09 @@ -13,3 +13,5 @@ CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_BOOST=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n #CHECK#CONFIG_RCU_EXPERT=n +CONFIG_KPROBES=n +CONFIG_FTRACE=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE10 b/tools/testing/selftests/rcutorture/configs/rcu/TREE10 index 4a00539bfdd7..a323d8948b7c 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE10 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE10 @@ -3,6 +3,7 @@ CONFIG_NR_CPUS=56 CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n #CHECK#CONFIG_TREE_RCU=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh index effa415f9b92..e2bc99c785e7 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh +++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh @@ -9,7 +9,7 @@ # rcutorture_param_n_barrier_cbs bootparam-string # -# Adds n_barrier_cbs rcutorture module parameter to kernels having it. +# Adds n_barrier_cbs rcutorture module parameter if not already specified. rcutorture_param_n_barrier_cbs () { if echo $1 | grep -q "rcutorture\.n_barrier_cbs" then @@ -30,13 +30,25 @@ rcutorture_param_onoff () { fi } +# rcutorture_param_stat_interval bootparam-string +# +# Adds stat_interval rcutorture module parameter if not already specified. +rcutorture_param_stat_interval () { + if echo $1 | grep -q "rcutorture\.stat_interval" + then + : + else + echo rcutorture.stat_interval=15 + fi +} + # per_version_boot_params bootparam-string config-file seconds # # Adds per-version torture-module parameters to kernels supporting them. per_version_boot_params () { echo $1 `rcutorture_param_onoff "$1" "$2"` \ `rcutorture_param_n_barrier_cbs "$1"` \ - rcutorture.stat_interval=15 \ + `rcutorture_param_stat_interval "$1"` \ rcutorture.shutdown_secs=$3 \ rcutorture.test_no_idle_hz=1 \ rcutorture.verbose=1 diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon b/tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon index 90942bb5bebc..6a00157bee5b 100644 --- a/tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon +++ b/tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon @@ -1,5 +1,6 @@ CONFIG_RCU_SCALE_TEST=y CONFIG_PRINTK_TIME=y -CONFIG_TASKS_RCU_GENERIC=y -CONFIG_TASKS_RCU=y -CONFIG_TASKS_TRACE_RCU=y +CONFIG_FORCE_TASKS_RCU=y +#CHECK#CONFIG_TASKS_RCU=y +CONFIG_FORCE_TASKS_TRACE_RCU=y +#CHECK#CONFIG_TASKS_TRACE_RCU=y diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE index f110d9ffbe4c..b10706fd03a4 100644 --- a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE +++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE @@ -16,3 +16,5 @@ CONFIG_RCU_BOOST=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n CONFIG_RCU_EXPERT=y CONFIG_RCU_TRACE=y +CONFIG_KPROBES=n +CONFIG_FTRACE=n diff --git a/tools/testing/selftests/rcutorture/configs/refscale/CFcommon b/tools/testing/selftests/rcutorture/configs/refscale/CFcommon index a98b58b54bb1..fbea3b13baba 100644 --- a/tools/testing/selftests/rcutorture/configs/refscale/CFcommon +++ b/tools/testing/selftests/rcutorture/configs/refscale/CFcommon @@ -1,2 +1,6 @@ CONFIG_RCU_REF_SCALE_TEST=y CONFIG_PRINTK_TIME=y +CONFIG_FORCE_TASKS_RCU=y +#CHECK#CONFIG_TASKS_RCU=y +CONFIG_FORCE_TASKS_TRACE_RCU=y +#CHECK#CONFIG_TASKS_TRACE_RCU=y diff --git a/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT b/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT index 7f06838a91e6..ef2b501a6971 100644 --- a/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT +++ b/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT @@ -15,3 +15,5 @@ CONFIG_PROVE_LOCKING=n CONFIG_RCU_BOOST=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n CONFIG_RCU_EXPERT=y +CONFIG_KPROBES=n +CONFIG_FTRACE=n diff --git a/tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT b/tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT index b8429d6c6ebc..3a59346b3de7 100644 --- a/tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT +++ b/tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT @@ -7,3 +7,5 @@ CONFIG_NO_HZ_IDLE=n CONFIG_NO_HZ_FULL=y CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_PROVE_LOCKING=n +CONFIG_KPROBES=n +CONFIG_FTRACE=n diff --git a/tools/testing/selftests/rcutorture/configs/scf/PREEMPT b/tools/testing/selftests/rcutorture/configs/scf/PREEMPT index ae4992b141b0..cb37e08037d6 100644 --- a/tools/testing/selftests/rcutorture/configs/scf/PREEMPT +++ b/tools/testing/selftests/rcutorture/configs/scf/PREEMPT @@ -7,3 +7,4 @@ CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh index d3d9e35d3d55..2d949e58f5a5 100644 --- a/tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh +++ b/tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh @@ -25,6 +25,5 @@ per_version_boot_params () { echo $1 `scftorture_param_onoff "$1" "$2"` \ scftorture.stat_interval=15 \ scftorture.shutdown_secs=$3 \ - scftorture.verbose=1 \ - scf + scftorture.verbose=1 } diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile index 585f7a0c10cb..f017c382c036 100644 --- a/tools/testing/selftests/seccomp/Makefile +++ b/tools/testing/selftests/seccomp/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/ LDFLAGS += -lpthread +LDLIBS += -lcap TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark include ../lib.mk diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 313bb0cbfb1e..136df5b76319 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -46,6 +46,7 @@ #include <sys/ioctl.h> #include <linux/kcmp.h> #include <sys/resource.h> +#include <sys/capability.h> #include <unistd.h> #include <sys/syscall.h> @@ -59,6 +60,8 @@ #define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__) #endif +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) + #ifndef PR_SET_PTRACER # define PR_SET_PTRACER 0x59616d61 #endif @@ -268,6 +271,10 @@ struct seccomp_notif_addfd_big { #define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4) #endif +#ifndef SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV +#define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5) +#endif + #ifndef seccomp int seccomp(unsigned int op, unsigned int flags, void *args) { @@ -3742,7 +3749,10 @@ TEST(user_notification_fault_recv) struct seccomp_notif req = {}; struct seccomp_notif_resp resp = {}; - ASSERT_EQ(unshare(CLONE_NEWUSER), 0); + ASSERT_EQ(unshare(CLONE_NEWUSER), 0) { + if (errno == EINVAL) + SKIP(return, "kernel missing CLONE_NEWUSER support"); + } listener = user_notif_syscall(__NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER); @@ -4231,6 +4241,421 @@ TEST(user_notification_addfd_rlimit) close(memfd); } +/* Make sure PTRACE_O_SUSPEND_SECCOMP requires CAP_SYS_ADMIN. */ +FIXTURE(O_SUSPEND_SECCOMP) { + pid_t pid; +}; + +FIXTURE_SETUP(O_SUSPEND_SECCOMP) +{ + ERRNO_FILTER(block_read, E2BIG); + cap_value_t cap_list[] = { CAP_SYS_ADMIN }; + cap_t caps; + + self->pid = 0; + + /* make sure we don't have CAP_SYS_ADMIN */ + caps = cap_get_proc(); + ASSERT_NE(NULL, caps); + ASSERT_EQ(0, cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR)); + ASSERT_EQ(0, cap_set_proc(caps)); + cap_free(caps); + + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + ASSERT_EQ(0, prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_block_read)); + + self->pid = fork(); + ASSERT_GE(self->pid, 0); + + if (self->pid == 0) { + while (1) + pause(); + _exit(127); + } +} + +FIXTURE_TEARDOWN(O_SUSPEND_SECCOMP) +{ + if (self->pid) + kill(self->pid, SIGKILL); +} + +TEST_F(O_SUSPEND_SECCOMP, setoptions) +{ + int wstatus; + + ASSERT_EQ(0, ptrace(PTRACE_ATTACH, self->pid, NULL, 0)); + ASSERT_EQ(self->pid, wait(&wstatus)); + ASSERT_EQ(-1, ptrace(PTRACE_SETOPTIONS, self->pid, NULL, PTRACE_O_SUSPEND_SECCOMP)); + if (errno == EINVAL) + SKIP(return, "Kernel does not support PTRACE_O_SUSPEND_SECCOMP (missing CONFIG_CHECKPOINT_RESTORE?)"); + ASSERT_EQ(EPERM, errno); +} + +TEST_F(O_SUSPEND_SECCOMP, seize) +{ + int ret; + + ret = ptrace(PTRACE_SEIZE, self->pid, NULL, PTRACE_O_SUSPEND_SECCOMP); + ASSERT_EQ(-1, ret); + if (errno == EINVAL) + SKIP(return, "Kernel does not support PTRACE_O_SUSPEND_SECCOMP (missing CONFIG_CHECKPOINT_RESTORE?)"); + ASSERT_EQ(EPERM, errno); +} + +/* + * get_nth - Get the nth, space separated entry in a file. + * + * Returns the length of the read field. + * Throws error if field is zero-lengthed. + */ +static ssize_t get_nth(struct __test_metadata *_metadata, const char *path, + const unsigned int position, char **entry) +{ + char *line = NULL; + unsigned int i; + ssize_t nread; + size_t len = 0; + FILE *f; + + f = fopen(path, "r"); + ASSERT_NE(f, NULL) { + TH_LOG("Could not open %s: %s", path, strerror(errno)); + } + + for (i = 0; i < position; i++) { + nread = getdelim(&line, &len, ' ', f); + ASSERT_GE(nread, 0) { + TH_LOG("Failed to read %d entry in file %s", i, path); + } + } + fclose(f); + + ASSERT_GT(nread, 0) { + TH_LOG("Entry in file %s had zero length", path); + } + + *entry = line; + return nread - 1; +} + +/* For a given PID, get the task state (D, R, etc...) */ +static char get_proc_stat(struct __test_metadata *_metadata, pid_t pid) +{ + char proc_path[100] = {0}; + char status; + char *line; + + snprintf(proc_path, sizeof(proc_path), "/proc/%d/stat", pid); + ASSERT_EQ(get_nth(_metadata, proc_path, 3, &line), 1); + + status = *line; + free(line); + + return status; +} + +TEST(user_notification_fifo) +{ + struct seccomp_notif_resp resp = {}; + struct seccomp_notif req = {}; + int i, status, listener; + pid_t pid, pids[3]; + __u64 baseid; + long ret; + /* 100 ms */ + struct timespec delay = { .tv_nsec = 100000000 }; + + ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); + ASSERT_EQ(0, ret) { + TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); + } + + /* Setup a listener */ + listener = user_notif_syscall(__NR_getppid, + SECCOMP_FILTER_FLAG_NEW_LISTENER); + ASSERT_GE(listener, 0); + + pid = fork(); + ASSERT_GE(pid, 0); + + if (pid == 0) { + ret = syscall(__NR_getppid); + exit(ret != USER_NOTIF_MAGIC); + } + + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + baseid = req.id + 1; + + resp.id = req.id; + resp.error = 0; + resp.val = USER_NOTIF_MAGIC; + + /* check that we make sure flags == 0 */ + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); + + EXPECT_EQ(waitpid(pid, &status, 0), pid); + EXPECT_EQ(true, WIFEXITED(status)); + EXPECT_EQ(0, WEXITSTATUS(status)); + + /* Start children, and generate notifications */ + for (i = 0; i < ARRAY_SIZE(pids); i++) { + pid = fork(); + if (pid == 0) { + ret = syscall(__NR_getppid); + exit(ret != USER_NOTIF_MAGIC); + } + pids[i] = pid; + } + + /* This spins until all of the children are sleeping */ +restart_wait: + for (i = 0; i < ARRAY_SIZE(pids); i++) { + if (get_proc_stat(_metadata, pids[i]) != 'S') { + nanosleep(&delay, NULL); + goto restart_wait; + } + } + + /* Read the notifications in order (and respond) */ + for (i = 0; i < ARRAY_SIZE(pids); i++) { + memset(&req, 0, sizeof(req)); + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + EXPECT_EQ(req.id, baseid + i); + resp.id = req.id; + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); + } + + /* Make sure notifications were received */ + for (i = 0; i < ARRAY_SIZE(pids); i++) { + EXPECT_EQ(waitpid(pids[i], &status, 0), pids[i]); + EXPECT_EQ(true, WIFEXITED(status)); + EXPECT_EQ(0, WEXITSTATUS(status)); + } +} + +/* get_proc_syscall - Get the syscall in progress for a given pid + * + * Returns the current syscall number for a given process + * Returns -1 if not in syscall (running or blocked) + */ +static long get_proc_syscall(struct __test_metadata *_metadata, int pid) +{ + char proc_path[100] = {0}; + long ret = -1; + ssize_t nread; + char *line; + + snprintf(proc_path, sizeof(proc_path), "/proc/%d/syscall", pid); + nread = get_nth(_metadata, proc_path, 1, &line); + ASSERT_GT(nread, 0); + + if (!strncmp("running", line, MIN(7, nread))) + ret = strtol(line, NULL, 16); + + free(line); + return ret; +} + +/* Ensure non-fatal signals prior to receive are unmodified */ +TEST(user_notification_wait_killable_pre_notification) +{ + struct sigaction new_action = { + .sa_handler = signal_handler, + }; + int listener, status, sk_pair[2]; + pid_t pid; + long ret; + char c; + /* 100 ms */ + struct timespec delay = { .tv_nsec = 100000000 }; + + ASSERT_EQ(sigemptyset(&new_action.sa_mask), 0); + + ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); + ASSERT_EQ(0, ret) + { + TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); + } + + ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); + + listener = user_notif_syscall( + __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); + ASSERT_GE(listener, 0); + + /* + * Check that we can kill the process with SIGUSR1 prior to receiving + * the notification. SIGUSR1 is wired up to a custom signal handler, + * and make sure it gets called. + */ + pid = fork(); + ASSERT_GE(pid, 0); + + if (pid == 0) { + close(sk_pair[0]); + handled = sk_pair[1]; + + /* Setup the non-fatal sigaction without SA_RESTART */ + if (sigaction(SIGUSR1, &new_action, NULL)) { + perror("sigaction"); + exit(1); + } + + ret = syscall(__NR_getppid); + /* Make sure we got a return from a signal interruption */ + exit(ret != -1 || errno != EINTR); + } + + /* + * Make sure we've gotten to the seccomp user notification wait + * from getppid prior to sending any signals + */ + while (get_proc_syscall(_metadata, pid) != __NR_getppid && + get_proc_stat(_metadata, pid) != 'S') + nanosleep(&delay, NULL); + + /* Send non-fatal kill signal */ + EXPECT_EQ(kill(pid, SIGUSR1), 0); + + /* wait for process to exit (exit checks for EINTR) */ + EXPECT_EQ(waitpid(pid, &status, 0), pid); + EXPECT_EQ(true, WIFEXITED(status)); + EXPECT_EQ(0, WEXITSTATUS(status)); + + EXPECT_EQ(read(sk_pair[0], &c, 1), 1); +} + +/* Ensure non-fatal signals after receive are blocked */ +TEST(user_notification_wait_killable) +{ + struct sigaction new_action = { + .sa_handler = signal_handler, + }; + struct seccomp_notif_resp resp = {}; + struct seccomp_notif req = {}; + int listener, status, sk_pair[2]; + pid_t pid; + long ret; + char c; + /* 100 ms */ + struct timespec delay = { .tv_nsec = 100000000 }; + + ASSERT_EQ(sigemptyset(&new_action.sa_mask), 0); + + ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); + ASSERT_EQ(0, ret) + { + TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); + } + + ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); + + listener = user_notif_syscall( + __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); + ASSERT_GE(listener, 0); + + pid = fork(); + ASSERT_GE(pid, 0); + + if (pid == 0) { + close(sk_pair[0]); + handled = sk_pair[1]; + + /* Setup the sigaction without SA_RESTART */ + if (sigaction(SIGUSR1, &new_action, NULL)) { + perror("sigaction"); + exit(1); + } + + /* Make sure that the syscall is completed (no EINTR) */ + ret = syscall(__NR_getppid); + exit(ret != USER_NOTIF_MAGIC); + } + + /* + * Get the notification, to make move the notifying process into a + * non-preemptible (TASK_KILLABLE) state. + */ + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + /* Send non-fatal kill signal */ + EXPECT_EQ(kill(pid, SIGUSR1), 0); + + /* + * Make sure the task enters moves to TASK_KILLABLE by waiting for + * D (Disk Sleep) state after receiving non-fatal signal. + */ + while (get_proc_stat(_metadata, pid) != 'D') + nanosleep(&delay, NULL); + + resp.id = req.id; + resp.val = USER_NOTIF_MAGIC; + /* Make sure the notification is found and able to be replied to */ + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); + + /* + * Make sure that the signal handler does get called once we're back in + * userspace. + */ + EXPECT_EQ(read(sk_pair[0], &c, 1), 1); + /* wait for process to exit (exit checks for USER_NOTIF_MAGIC) */ + EXPECT_EQ(waitpid(pid, &status, 0), pid); + EXPECT_EQ(true, WIFEXITED(status)); + EXPECT_EQ(0, WEXITSTATUS(status)); +} + +/* Ensure fatal signals after receive are not blocked */ +TEST(user_notification_wait_killable_fatal) +{ + struct seccomp_notif req = {}; + int listener, status; + pid_t pid; + long ret; + /* 100 ms */ + struct timespec delay = { .tv_nsec = 100000000 }; + + ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); + ASSERT_EQ(0, ret) + { + TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); + } + + listener = user_notif_syscall( + __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); + ASSERT_GE(listener, 0); + + pid = fork(); + ASSERT_GE(pid, 0); + + if (pid == 0) { + /* This should never complete as it should get a SIGTERM */ + syscall(__NR_getppid); + exit(1); + } + + while (get_proc_stat(_metadata, pid) != 'S') + nanosleep(&delay, NULL); + + /* + * Get the notification, to make move the notifying process into a + * non-preemptible (TASK_KILLABLE) state. + */ + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + /* Kill the process with a fatal signal */ + EXPECT_EQ(kill(pid, SIGTERM), 0); + + /* + * Wait for the process to exit, and make sure the process terminated + * due to the SIGTERM signal. + */ + EXPECT_EQ(waitpid(pid, &status, 0), pid); + EXPECT_EQ(true, WIFSIGNALED(status)); + EXPECT_EQ(SIGTERM, WTERMSIG(status)); +} + /* * TODO: * - expand NNP testing |