summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/include/linux/static_call_types.h4
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst-core-tpmi.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/socket_helpers.h9
-rw-r--r--tools/testing/selftests/futex/functional/.gitignore1
-rw-r--r--tools/testing/selftests/futex/functional/Makefile3
-rw-r--r--tools/testing/selftests/futex/functional/futex_numa_mpol.c5
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait.c8
-rw-r--r--tools/testing/selftests/futex/functional/futex_waitv.c2
-rw-r--r--tools/testing/selftests/futex/functional/robust_list.c552
-rw-r--r--tools/virtio/linux/compiler.h2
11 files changed, 619 insertions, 15 deletions
diff --git a/tools/include/linux/static_call_types.h b/tools/include/linux/static_call_types.h
index 5a00b8b2cf9f..cfb6ddeb292b 100644
--- a/tools/include/linux/static_call_types.h
+++ b/tools/include/linux/static_call_types.h
@@ -25,6 +25,8 @@
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
+#ifndef __ASSEMBLY__
+
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
@@ -100,4 +102,6 @@ struct static_call_key {
#endif /* CONFIG_HAVE_STATIC_CALL */
+#endif /* __ASSEMBLY__ */
+
#endif /* _STATIC_CALL_TYPES_H */
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 0ce251b8d466..558138eea75e 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -16,7 +16,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.23";
+static const char *version_str = "v1.24";
static const int supported_api_ver = 3;
static struct isst_if_platform_info isst_platform_info;
diff --git a/tools/power/x86/intel-speed-select/isst-core-tpmi.c b/tools/power/x86/intel-speed-select/isst-core-tpmi.c
index 4f389e1c0525..ebaad0dc8ca6 100644
--- a/tools/power/x86/intel-speed-select/isst-core-tpmi.c
+++ b/tools/power/x86/intel-speed-select/isst-core-tpmi.c
@@ -452,13 +452,16 @@ static int tpmi_get_pbf_info(struct isst_id *id, int level,
return _pbf_get_coremask_info(id, level, pbf_info);
}
+#define FEATURE_ENABLE_WAIT_US 1000
+#define FEATURE_ENABLE_RETRIES 5
+
static int tpmi_set_pbf_fact_status(struct isst_id *id, int pbf, int enable)
{
struct isst_pkg_ctdp pkg_dev;
struct isst_pkg_ctdp_level_info ctdp_level;
int current_level;
struct isst_perf_feature_control info;
- int ret;
+ int ret, i;
ret = isst_get_ctdp_levels(id, &pkg_dev);
if (ret)
@@ -503,6 +506,30 @@ static int tpmi_set_pbf_fact_status(struct isst_id *id, int pbf, int enable)
if (ret == -1)
return ret;
+ for (i = 0; i < FEATURE_ENABLE_RETRIES; ++i) {
+
+ usleep(FEATURE_ENABLE_WAIT_US);
+
+ /* Check status */
+ ret = isst_get_ctdp_control(id, current_level, &ctdp_level);
+ if (ret)
+ return ret;
+
+ debug_printf("pbf_enabled:%d fact_enabled:%d\n",
+ ctdp_level.pbf_enabled, ctdp_level.fact_enabled);
+
+ if (pbf) {
+ if (ctdp_level.pbf_enabled == enable)
+ break;
+ } else {
+ if (ctdp_level.fact_enabled == enable)
+ break;
+ }
+ }
+
+ if (i == FEATURE_ENABLE_RETRIES)
+ return -1;
+
return 0;
}
@@ -513,6 +540,7 @@ static int tpmi_get_fact_info(struct isst_id *id, int level, int fact_bucket,
int i, j;
int ret;
+ memset(&info, 0, sizeof(info));
info.socket_id = id->pkg;
info.power_domain_id = id->punit;
info.level = level;
@@ -659,7 +687,8 @@ static int tpmi_pm_qos_config(struct isst_id *id, int enable_clos,
int priority_type)
{
struct isst_core_power info;
- int i, ret, saved_punit;
+ int cp_state = 0, cp_cap = 0;
+ int i, j, ret, saved_punit;
info.get_set = 1;
info.socket_id = id->pkg;
@@ -679,6 +708,19 @@ static int tpmi_pm_qos_config(struct isst_id *id, int enable_clos,
id->punit = saved_punit;
return ret;
}
+ /* Get status */
+ for (j = 0; j < FEATURE_ENABLE_RETRIES; ++j) {
+ usleep(FEATURE_ENABLE_WAIT_US);
+ ret = tpmi_read_pm_config(id, &cp_state, &cp_cap);
+ debug_printf("ret:%d cp_state:%d enable_clos:%d\n", ret,
+ cp_state, enable_clos);
+ if (ret || cp_state == enable_clos)
+ break;
+ }
+ if (j == FEATURE_ENABLE_RETRIES) {
+ id->punit = saved_punit;
+ return -1;
+ }
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
index e02cabcc814e..0d59503a0c73 100644
--- a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
@@ -17,11 +17,16 @@
#define VMADDR_CID_LOCAL 1
#endif
+/* include/linux/compiler_types.h */
+#if __STDC_VERSION__ < 202311L && !defined(auto)
+# define auto __auto_type
+#endif
+
/* include/linux/cleanup.h */
#define __get_and_null(p, nullvalue) \
({ \
- __auto_type __ptr = &(p); \
- __auto_type __val = *__ptr; \
+ auto __ptr = &(p); \
+ auto __val = *__ptr; \
*__ptr = nullvalue; \
__val; \
})
diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
index 776ad658f75e..23b9fea8d190 100644
--- a/tools/testing/selftests/futex/functional/.gitignore
+++ b/tools/testing/selftests/futex/functional/.gitignore
@@ -12,3 +12,4 @@ futex_wait_uninitialized_heap
futex_wait_wouldblock
futex_waitv
futex_numa
+robust_list
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 490ace1f017e..af7ec309ea78 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -22,7 +22,8 @@ TEST_GEN_PROGS := \
futex_priv_hash \
futex_numa_mpol \
futex_waitv \
- futex_numa
+ futex_numa \
+ robust_list
TEST_PROGS := run.sh
diff --git a/tools/testing/selftests/futex/functional/futex_numa_mpol.c b/tools/testing/selftests/futex/functional/futex_numa_mpol.c
index ab8555752137..220ef219c823 100644
--- a/tools/testing/selftests/futex/functional/futex_numa_mpol.c
+++ b/tools/testing/selftests/futex/functional/futex_numa_mpol.c
@@ -131,11 +131,6 @@ static void test_futex(void *futex_ptr, int err_value)
__test_futex(futex_ptr, err_value, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA);
}
-static void test_futex_mpol(void *futex_ptr, int err_value)
-{
- __test_futex(futex_ptr, err_value, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL);
-}
-
TEST(futex_numa_mpol)
{
struct futex32_numa *futex_numa;
diff --git a/tools/testing/selftests/futex/functional/futex_wait.c b/tools/testing/selftests/futex/functional/futex_wait.c
index 0e69c53524c1..7b8879409007 100644
--- a/tools/testing/selftests/futex/functional/futex_wait.c
+++ b/tools/testing/selftests/futex/functional/futex_wait.c
@@ -71,6 +71,8 @@ TEST(anon_page)
/* Testing an anon page shared memory */
shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
if (shm_id < 0) {
+ if (errno == ENOSYS)
+ ksft_exit_skip("shmget syscall not supported\n");
perror("shmget");
exit(1);
}
@@ -108,14 +110,14 @@ TEST(file_backed)
/* Testing a file backed shared memory */
fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
if (fd < 0)
- ksft_exit_fail_msg("open");
+ ksft_exit_fail_msg("open\n");
if (ftruncate(fd, sizeof(f_private)))
- ksft_exit_fail_msg("ftruncate");
+ ksft_exit_fail_msg("ftruncate\n");
shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (shm == MAP_FAILED)
- ksft_exit_fail_msg("mmap");
+ ksft_exit_fail_msg("mmap\n");
memcpy(shm, &f_private, sizeof(f_private));
diff --git a/tools/testing/selftests/futex/functional/futex_waitv.c b/tools/testing/selftests/futex/functional/futex_waitv.c
index d60876164d4b..b5ada9fdb26f 100644
--- a/tools/testing/selftests/futex/functional/futex_waitv.c
+++ b/tools/testing/selftests/futex/functional/futex_waitv.c
@@ -86,6 +86,8 @@ TEST(shared_waitv)
int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
if (shm_id < 0) {
+ if (errno == ENOSYS)
+ ksft_exit_skip("shmget syscall not supported\n");
perror("shmget");
exit(1);
}
diff --git a/tools/testing/selftests/futex/functional/robust_list.c b/tools/testing/selftests/futex/functional/robust_list.c
new file mode 100644
index 000000000000..e7d1254e18ca
--- /dev/null
+++ b/tools/testing/selftests/futex/functional/robust_list.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2025 Igalia S.L.
+ *
+ * Robust list test by André Almeida <andrealmeid@igalia.com>
+ *
+ * The robust list uAPI allows userspace to create "robust" locks, in the sense
+ * that if the lock holder thread dies, the remaining threads that are waiting
+ * for the lock won't block forever, waiting for a lock that will never be
+ * released.
+ *
+ * This is achieve by userspace setting a list where a thread can enter all the
+ * locks (futexes) that it is holding. The robust list is a linked list, and
+ * userspace register the start of the list with the syscall set_robust_list().
+ * If such thread eventually dies, the kernel will walk this list, waking up one
+ * thread waiting for each futex and marking the futex word with the flag
+ * FUTEX_OWNER_DIED.
+ *
+ * See also
+ * man set_robust_list
+ * Documententation/locking/robust-futex-ABI.rst
+ * Documententation/locking/robust-futexes.rst
+ */
+
+#define _GNU_SOURCE
+
+#include "futextest.h"
+#include "../../kselftest_harness.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+
+#define STACK_SIZE (1024 * 1024)
+
+#define FUTEX_TIMEOUT 3
+
+#define SLEEP_US 100
+
+static pthread_barrier_t barrier, barrier2;
+
+static int set_robust_list(struct robust_list_head *head, size_t len)
+{
+ return syscall(SYS_set_robust_list, head, len);
+}
+
+static int get_robust_list(int pid, struct robust_list_head **head, size_t *len_ptr)
+{
+ return syscall(SYS_get_robust_list, pid, head, len_ptr);
+}
+
+/*
+ * Basic lock struct, contains just the futex word and the robust list element
+ * Real implementations have also a *prev to easily walk in the list
+ */
+struct lock_struct {
+ _Atomic(unsigned int) futex;
+ struct robust_list list;
+};
+
+/*
+ * Helper function to spawn a child thread. Returns -1 on error, pid on success
+ */
+static int create_child(int (*fn)(void *arg), void *arg)
+{
+ char *stack;
+ pid_t pid;
+
+ stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (stack == MAP_FAILED)
+ return -1;
+
+ stack += STACK_SIZE;
+
+ pid = clone(fn, stack, CLONE_VM | SIGCHLD, arg);
+
+ if (pid == -1)
+ return -1;
+
+ return pid;
+}
+
+/*
+ * Helper function to prepare and register a robust list
+ */
+static int set_list(struct robust_list_head *head)
+{
+ int ret;
+
+ ret = set_robust_list(head, sizeof(*head));
+ if (ret)
+ return ret;
+
+ head->futex_offset = (size_t) offsetof(struct lock_struct, futex) -
+ (size_t) offsetof(struct lock_struct, list);
+ head->list.next = &head->list;
+ head->list_op_pending = NULL;
+
+ return 0;
+}
+
+/*
+ * A basic (and incomplete) mutex lock function with robustness
+ */
+static int mutex_lock(struct lock_struct *lock, struct robust_list_head *head, bool error_inject)
+{
+ _Atomic(unsigned int) *futex = &lock->futex;
+ unsigned int zero = 0;
+ pid_t tid = gettid();
+ int ret = -1;
+
+ /*
+ * Set list_op_pending before starting the lock, so the kernel can catch
+ * the case where the thread died during the lock operation
+ */
+ head->list_op_pending = &lock->list;
+
+ if (atomic_compare_exchange_strong(futex, &zero, tid)) {
+ /*
+ * We took the lock, insert it in the robust list
+ */
+ struct robust_list *list = &head->list;
+
+ /* Error injection to test list_op_pending */
+ if (error_inject)
+ return 0;
+
+ while (list->next != &head->list)
+ list = list->next;
+
+ list->next = &lock->list;
+ lock->list.next = &head->list;
+
+ ret = 0;
+ } else {
+ /*
+ * We didn't take the lock, wait until the owner wakes (or dies)
+ */
+ struct timespec to;
+
+ to.tv_sec = FUTEX_TIMEOUT;
+ to.tv_nsec = 0;
+
+ tid = atomic_load(futex);
+ /* Kernel ignores futexes without the waiters flag */
+ tid |= FUTEX_WAITERS;
+ atomic_store(futex, tid);
+
+ ret = futex_wait((futex_t *) futex, tid, &to, 0);
+
+ /*
+ * A real mutex_lock() implementation would loop here to finally
+ * take the lock. We don't care about that, so we stop here.
+ */
+ }
+
+ head->list_op_pending = NULL;
+
+ return ret;
+}
+
+/*
+ * This child thread will succeed taking the lock, and then will exit holding it
+ */
+static int child_fn_lock(void *arg)
+{
+ struct lock_struct *lock = arg;
+ struct robust_list_head head;
+ int ret;
+
+ ret = set_list(&head);
+ if (ret) {
+ ksft_test_result_fail("set_robust_list error\n");
+ return ret;
+ }
+
+ ret = mutex_lock(lock, &head, false);
+ if (ret) {
+ ksft_test_result_fail("mutex_lock error\n");
+ return ret;
+ }
+
+ pthread_barrier_wait(&barrier);
+
+ /*
+ * There's a race here: the parent thread needs to be inside
+ * futex_wait() before the child thread dies, otherwise it will miss the
+ * wakeup from handle_futex_death() that this child will emit. We wait a
+ * little bit just to make sure that this happens.
+ */
+ usleep(SLEEP_US);
+
+ return 0;
+}
+
+/*
+ * Spawns a child thread that will set a robust list, take the lock, register it
+ * in the robust list and die. The parent thread will wait on this futex, and
+ * should be waken up when the child exits.
+ */
+TEST(test_robustness)
+{
+ struct lock_struct lock = { .futex = 0 };
+ _Atomic(unsigned int) *futex = &lock.futex;
+ struct robust_list_head head;
+ int ret, pid, wstatus;
+
+ ret = set_list(&head);
+ ASSERT_EQ(ret, 0);
+
+ /*
+ * Lets use a barrier to ensure that the child thread takes the lock
+ * before the parent
+ */
+ ret = pthread_barrier_init(&barrier, NULL, 2);
+ ASSERT_EQ(ret, 0);
+
+ pid = create_child(&child_fn_lock, &lock);
+ ASSERT_NE(pid, -1);
+
+ pthread_barrier_wait(&barrier);
+ ret = mutex_lock(&lock, &head, false);
+
+ /*
+ * futex_wait() should return 0 and the futex word should be marked with
+ * FUTEX_OWNER_DIED
+ */
+ ASSERT_EQ(ret, 0);
+
+ ASSERT_TRUE(*futex & FUTEX_OWNER_DIED);
+
+ wait(&wstatus);
+ pthread_barrier_destroy(&barrier);
+
+ /* Pass only if the child hasn't return error */
+ if (!WEXITSTATUS(wstatus))
+ ksft_test_result_pass("%s\n", __func__);
+}
+
+/*
+ * The only valid value for len is sizeof(*head)
+ */
+TEST(test_set_robust_list_invalid_size)
+{
+ struct robust_list_head head;
+ size_t head_size = sizeof(head);
+ int ret;
+
+ ret = set_robust_list(&head, head_size);
+ ASSERT_EQ(ret, 0);
+
+ ret = set_robust_list(&head, head_size * 2);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EINVAL);
+
+ ret = set_robust_list(&head, head_size - 1);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EINVAL);
+
+ ret = set_robust_list(&head, 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EINVAL);
+
+ ksft_test_result_pass("%s\n", __func__);
+}
+
+/*
+ * Test get_robust_list with pid = 0, getting the list of the running thread
+ */
+TEST(test_get_robust_list_self)
+{
+ struct robust_list_head head, head2, *get_head;
+ size_t head_size = sizeof(head), len_ptr;
+ int ret;
+
+ ret = set_robust_list(&head, head_size);
+ ASSERT_EQ(ret, 0);
+
+ ret = get_robust_list(0, &get_head, &len_ptr);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(get_head, &head);
+ ASSERT_EQ(head_size, len_ptr);
+
+ ret = set_robust_list(&head2, head_size);
+ ASSERT_EQ(ret, 0);
+
+ ret = get_robust_list(0, &get_head, &len_ptr);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(get_head, &head2);
+ ASSERT_EQ(head_size, len_ptr);
+
+ ksft_test_result_pass("%s\n", __func__);
+}
+
+static int child_list(void *arg)
+{
+ struct robust_list_head *head = arg;
+ int ret;
+
+ ret = set_robust_list(head, sizeof(*head));
+ if (ret) {
+ ksft_test_result_fail("set_robust_list error\n");
+ return -1;
+ }
+
+ /*
+ * After setting the list head, wait until the main thread can call
+ * get_robust_list() for this thread before exiting.
+ */
+ pthread_barrier_wait(&barrier);
+ pthread_barrier_wait(&barrier2);
+
+ return 0;
+}
+
+/*
+ * Test get_robust_list from another thread. We use two barriers here to ensure
+ * that:
+ * 1) the child thread set the list before we try to get it from the
+ * parent
+ * 2) the child thread still alive when we try to get the list from it
+ */
+TEST(test_get_robust_list_child)
+{
+ struct robust_list_head head, *get_head;
+ int ret, wstatus;
+ size_t len_ptr;
+ pid_t tid;
+
+ ret = pthread_barrier_init(&barrier, NULL, 2);
+ ret = pthread_barrier_init(&barrier2, NULL, 2);
+ ASSERT_EQ(ret, 0);
+
+ tid = create_child(&child_list, &head);
+ ASSERT_NE(tid, -1);
+
+ pthread_barrier_wait(&barrier);
+
+ ret = get_robust_list(tid, &get_head, &len_ptr);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(&head, get_head);
+
+ pthread_barrier_wait(&barrier2);
+
+ wait(&wstatus);
+ pthread_barrier_destroy(&barrier);
+ pthread_barrier_destroy(&barrier2);
+
+ /* Pass only if the child hasn't return error */
+ if (!WEXITSTATUS(wstatus))
+ ksft_test_result_pass("%s\n", __func__);
+}
+
+static int child_fn_lock_with_error(void *arg)
+{
+ struct lock_struct *lock = arg;
+ struct robust_list_head head;
+ int ret;
+
+ ret = set_list(&head);
+ if (ret) {
+ ksft_test_result_fail("set_robust_list error\n");
+ return -1;
+ }
+
+ ret = mutex_lock(lock, &head, true);
+ if (ret) {
+ ksft_test_result_fail("mutex_lock error\n");
+ return -1;
+ }
+
+ pthread_barrier_wait(&barrier);
+
+ /* See comment at child_fn_lock() */
+ usleep(SLEEP_US);
+
+ return 0;
+}
+
+/*
+ * Same as robustness test, but inject an error where the mutex_lock() exits
+ * earlier, just after setting list_op_pending and taking the lock, to test the
+ * list_op_pending mechanism
+ */
+TEST(test_set_list_op_pending)
+{
+ struct lock_struct lock = { .futex = 0 };
+ _Atomic(unsigned int) *futex = &lock.futex;
+ struct robust_list_head head;
+ int ret, wstatus;
+
+ ret = set_list(&head);
+ ASSERT_EQ(ret, 0);
+
+ ret = pthread_barrier_init(&barrier, NULL, 2);
+ ASSERT_EQ(ret, 0);
+
+ ret = create_child(&child_fn_lock_with_error, &lock);
+ ASSERT_NE(ret, -1);
+
+ pthread_barrier_wait(&barrier);
+ ret = mutex_lock(&lock, &head, false);
+
+ ASSERT_EQ(ret, 0);
+
+ ASSERT_TRUE(*futex & FUTEX_OWNER_DIED);
+
+ wait(&wstatus);
+ pthread_barrier_destroy(&barrier);
+
+ /* Pass only if the child hasn't return error */
+ if (!WEXITSTATUS(wstatus))
+ ksft_test_result_pass("%s\n", __func__);
+ else
+ ksft_test_result_fail("%s\n", __func__);
+}
+
+#define CHILD_NR 10
+
+static int child_lock_holder(void *arg)
+{
+ struct lock_struct *locks = arg;
+ struct robust_list_head head;
+ int i;
+
+ set_list(&head);
+
+ for (i = 0; i < CHILD_NR; i++) {
+ locks[i].futex = 0;
+ mutex_lock(&locks[i], &head, false);
+ }
+
+ pthread_barrier_wait(&barrier);
+ pthread_barrier_wait(&barrier2);
+
+ /* See comment at child_fn_lock() */
+ usleep(SLEEP_US);
+
+ return 0;
+}
+
+static int child_wait_lock(void *arg)
+{
+ struct lock_struct *lock = arg;
+ struct robust_list_head head;
+ int ret;
+
+ pthread_barrier_wait(&barrier2);
+ ret = mutex_lock(lock, &head, false);
+
+ if (ret) {
+ ksft_test_result_fail("mutex_lock error\n");
+ return -1;
+ }
+
+ if (!(lock->futex & FUTEX_OWNER_DIED)) {
+ ksft_test_result_fail("futex not marked with FUTEX_OWNER_DIED\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Test a robust list of more than one element. All the waiters should wake when
+ * the holder dies
+ */
+TEST(test_robust_list_multiple_elements)
+{
+ struct lock_struct locks[CHILD_NR];
+ pid_t pids[CHILD_NR + 1];
+ int i, ret, wstatus;
+
+ ret = pthread_barrier_init(&barrier, NULL, 2);
+ ASSERT_EQ(ret, 0);
+ ret = pthread_barrier_init(&barrier2, NULL, CHILD_NR + 1);
+ ASSERT_EQ(ret, 0);
+
+ pids[0] = create_child(&child_lock_holder, &locks);
+
+ /* Wait until the locker thread takes the look */
+ pthread_barrier_wait(&barrier);
+
+ for (i = 0; i < CHILD_NR; i++)
+ pids[i+1] = create_child(&child_wait_lock, &locks[i]);
+
+ /* Wait for all children to return */
+ ret = 0;
+
+ for (i = 0; i < CHILD_NR; i++) {
+ waitpid(pids[i], &wstatus, 0);
+ if (WEXITSTATUS(wstatus))
+ ret = -1;
+ }
+
+ pthread_barrier_destroy(&barrier);
+ pthread_barrier_destroy(&barrier2);
+
+ /* Pass only if the child hasn't return error */
+ if (!ret)
+ ksft_test_result_pass("%s\n", __func__);
+}
+
+static int child_circular_list(void *arg)
+{
+ static struct robust_list_head head;
+ struct lock_struct a, b, c;
+ int ret;
+
+ ret = set_list(&head);
+ if (ret) {
+ ksft_test_result_fail("set_list error\n");
+ return -1;
+ }
+
+ head.list.next = &a.list;
+
+ /*
+ * The last element should point to head list, but we short circuit it
+ */
+ a.list.next = &b.list;
+ b.list.next = &c.list;
+ c.list.next = &a.list;
+
+ return 0;
+}
+
+/*
+ * Create a circular robust list. The kernel should be able to destroy the list
+ * while processing it so it won't be trapped in an infinite loop while handling
+ * a process exit
+ */
+TEST(test_circular_list)
+{
+ int wstatus;
+
+ create_child(child_circular_list, NULL);
+
+ wait(&wstatus);
+
+ /* Pass only if the child hasn't return error */
+ if (!WEXITSTATUS(wstatus))
+ ksft_test_result_pass("%s\n", __func__);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h
index 204ef0e9f542..725b93bfeee1 100644
--- a/tools/virtio/linux/compiler.h
+++ b/tools/virtio/linux/compiler.h
@@ -31,7 +31,7 @@
*/
#define data_race(expr) \
({ \
- __auto_type __v = (expr); \
+ auto __v = (expr); \
__v; \
})