summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--lib/crypto/s390/chacha-glue.c1
-rw-r--r--lib/kobject_uevent.c20
-rw-r--r--lib/kunit/Kconfig13
-rw-r--r--lib/kunit/kunit-test.c55
-rw-r--r--lib/kunit/test.c47
-rw-r--r--lib/kunit/try-catch-impl.h4
-rw-r--r--lib/kunit/try-catch.c29
-rw-r--r--lib/kunit/user_alloc.c4
-rw-r--r--lib/raid6/recov_s390xc.c1
-rw-r--r--lib/ref_tracker.c289
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/test_objagg.c77
-rw-r--r--lib/tests/Makefile1
-rw-r--r--lib/tests/longest_symbol_kunit.c3
-rw-r--r--lib/tests/test_ratelimit.c144
-rw-r--r--lib/vdso/gettimeofday.c224
17 files changed, 756 insertions, 174 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8d969b250b18..53332a1d8af4 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2515,8 +2515,8 @@ config TEST_IDA
tristate "Perform selftest on IDA functions"
config TEST_MISC_MINOR
- tristate "miscdevice KUnit test" if !KUNIT_ALL_TESTS
- depends on KUNIT
+ bool "miscdevice KUnit test" if !KUNIT_ALL_TESTS
+ depends on KUNIT=y
default KUNIT_ALL_TESTS
help
Kunit test for miscdevice API, specially its behavior in respect to
@@ -2894,6 +2894,7 @@ config FORTIFY_KUNIT_TEST
config LONGEST_SYM_KUNIT_TEST
tristate "Test the longest symbol possible" if !KUNIT_ALL_TESTS
depends on KUNIT && KPROBES
+ depends on !PREFIX_SYMBOLS && !CFI_CLANG && !GCOV_KERNEL
default KUNIT_ALL_TESTS
help
Tests the longest symbol possible
@@ -3213,6 +3214,17 @@ config TEST_OBJPOOL
If unsure, say N.
+config RATELIMIT_KUNIT_TEST
+ tristate "KUnit Test for correctness and stress of ratelimit" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the "test_ratelimit" module that should be used
+ for correctness verification and concurrent testings of rate
+ limiting.
+
+ If unsure, say N.
+
config INT_POW_KUNIT_TEST
tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/crypto/s390/chacha-glue.c b/lib/crypto/s390/chacha-glue.c
index f95ba3483bbc..c57dc851214f 100644
--- a/lib/crypto/s390/chacha-glue.c
+++ b/lib/crypto/s390/chacha-glue.c
@@ -10,6 +10,7 @@
#include <crypto/chacha.h>
#include <linux/cpufeature.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index b7f2fa08d9c8..78e16b95d210 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -826,3 +826,23 @@ static int __init kobject_uevent_init(void)
postcore_initcall(kobject_uevent_init);
#endif
+
+#ifdef CONFIG_UEVENT_HELPER
+static const struct ctl_table uevent_helper_sysctl_table[] = {
+ {
+ .procname = "hotplug",
+ .data = &uevent_helper,
+ .maxlen = UEVENT_HELPER_PATH_LEN,
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
+};
+
+static int __init init_uevent_helper_sysctl(void)
+{
+ register_sysctl_init("kernel", uevent_helper_sysctl_table);
+ return 0;
+}
+
+postcore_initcall(init_uevent_helper_sysctl);
+#endif
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index a97897edd964..c10ede4b1d22 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -93,4 +93,17 @@ config KUNIT_AUTORUN_ENABLED
In most cases this should be left as Y. Only if additional opt-in
behavior is needed should this be set to N.
+config KUNIT_DEFAULT_TIMEOUT
+ int "Default value of the timeout module parameter"
+ default 300
+ help
+ Sets the default timeout, in seconds, for Kunit test cases. This value
+ is further multiplied by a factor determined by the assigned speed
+ setting: 1x for `DEFAULT`, 3x for `KUNIT_SPEED_SLOW`, and 12x for
+ `KUNIT_SPEED_VERY_SLOW`. This allows slower tests on slower machines
+ sufficient time to complete.
+
+ If unsure, the default timeout of 300 seconds is suitable for most
+ cases.
+
endif # KUNIT
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index d9c781c859fd..8c01eabd4eaf 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -8,6 +8,7 @@
#include "linux/gfp_types.h"
#include <kunit/test.h>
#include <kunit/test-bug.h>
+#include <kunit/static_stub.h>
#include <linux/device.h>
#include <kunit/device.h>
@@ -43,7 +44,8 @@ static void kunit_test_try_catch_successful_try_no_catch(struct kunit *test)
kunit_try_catch_init(try_catch,
test,
kunit_test_successful_try,
- kunit_test_no_catch);
+ kunit_test_no_catch,
+ 300 * msecs_to_jiffies(MSEC_PER_SEC));
kunit_try_catch_run(try_catch, test);
KUNIT_EXPECT_TRUE(test, ctx->function_called);
@@ -75,7 +77,8 @@ static void kunit_test_try_catch_unsuccessful_try_does_catch(struct kunit *test)
kunit_try_catch_init(try_catch,
test,
kunit_test_unsuccessful_try,
- kunit_test_catch);
+ kunit_test_catch,
+ 300 * msecs_to_jiffies(MSEC_PER_SEC));
kunit_try_catch_run(try_catch, test);
KUNIT_EXPECT_TRUE(test, ctx->function_called);
@@ -129,7 +132,8 @@ static void kunit_test_fault_null_dereference(struct kunit *test)
kunit_try_catch_init(try_catch,
test,
kunit_test_null_dereference,
- kunit_test_catch);
+ kunit_test_catch,
+ 300 * msecs_to_jiffies(MSEC_PER_SEC));
kunit_try_catch_run(try_catch, test);
KUNIT_EXPECT_EQ(test, try_catch->try_result, -EINTR);
@@ -868,10 +872,53 @@ static struct kunit_suite kunit_current_test_suite = {
.test_cases = kunit_current_test_cases,
};
+static void kunit_stub_test(struct kunit *test)
+{
+ struct kunit fake_test;
+ const unsigned long fake_real_fn_addr = 0x1234;
+ const unsigned long fake_replacement_addr = 0x5678;
+ struct kunit_resource *res;
+ struct {
+ void *real_fn_addr;
+ void *replacement_addr;
+ } *stub_ctx;
+
+ kunit_init_test(&fake_test, "kunit_stub_fake_test", NULL);
+ KUNIT_ASSERT_EQ(test, fake_test.status, KUNIT_SUCCESS);
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&fake_test.resources), 0);
+
+ __kunit_activate_static_stub(&fake_test, (void *)fake_real_fn_addr,
+ (void *)fake_replacement_addr);
+ KUNIT_ASSERT_EQ(test, fake_test.status, KUNIT_SUCCESS);
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&fake_test.resources), 1);
+
+ res = list_first_entry(&fake_test.resources, struct kunit_resource, node);
+ KUNIT_EXPECT_NOT_NULL(test, res);
+
+ stub_ctx = res->data;
+ KUNIT_EXPECT_NOT_NULL(test, stub_ctx);
+ KUNIT_EXPECT_EQ(test, (unsigned long)stub_ctx->real_fn_addr, fake_real_fn_addr);
+ KUNIT_EXPECT_EQ(test, (unsigned long)stub_ctx->replacement_addr, fake_replacement_addr);
+
+ __kunit_activate_static_stub(&fake_test, (void *)fake_real_fn_addr, NULL);
+ KUNIT_ASSERT_EQ(test, fake_test.status, KUNIT_SUCCESS);
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&fake_test.resources), 0);
+}
+
+static struct kunit_case kunit_stub_test_cases[] = {
+ KUNIT_CASE(kunit_stub_test),
+ {}
+};
+
+static struct kunit_suite kunit_stub_test_suite = {
+ .name = "kunit_stub",
+ .test_cases = kunit_stub_test_cases,
+};
+
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite, &kunit_status_test_suite,
&kunit_current_test_suite, &kunit_device_test_suite,
- &kunit_fault_test_suite);
+ &kunit_fault_test_suite, &kunit_stub_test_suite);
MODULE_DESCRIPTION("KUnit test for core test infrastructure");
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 146d1b48a096..f3c6b11f12b8 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -70,6 +70,13 @@ module_param_named(enable, enable_param, bool, 0);
MODULE_PARM_DESC(enable, "Enable KUnit tests");
/*
+ * Configure the base timeout.
+ */
+static unsigned long kunit_base_timeout = CONFIG_KUNIT_DEFAULT_TIMEOUT;
+module_param_named(timeout, kunit_base_timeout, ulong, 0644);
+MODULE_PARM_DESC(timeout, "Set the base timeout for Kunit test cases");
+
+/*
* KUnit statistic mode:
* 0 - disabled
* 1 - only when there is more than one subtest
@@ -373,6 +380,40 @@ static void kunit_run_case_check_speed(struct kunit *test,
duration.tv_sec, duration.tv_nsec);
}
+/* Returns timeout multiplier based on speed.
+ * DEFAULT: 1
+ * KUNIT_SPEED_SLOW: 3
+ * KUNIT_SPEED_VERY_SLOW: 12
+ */
+static int kunit_timeout_mult(enum kunit_speed speed)
+{
+ switch (speed) {
+ case KUNIT_SPEED_SLOW:
+ return 3;
+ case KUNIT_SPEED_VERY_SLOW:
+ return 12;
+ default:
+ return 1;
+ }
+}
+
+static unsigned long kunit_test_timeout(struct kunit_suite *suite, struct kunit_case *test_case)
+{
+ int mult = 1;
+
+ /*
+ * The default test timeout is 300 seconds and will be adjusted by mult
+ * based on the test speed. The test speed will be overridden by the
+ * innermost test component.
+ */
+ if (suite->attr.speed != KUNIT_SPEED_UNSET)
+ mult = kunit_timeout_mult(suite->attr.speed);
+ if (test_case->attr.speed != KUNIT_SPEED_UNSET)
+ mult = kunit_timeout_mult(test_case->attr.speed);
+ return mult * kunit_base_timeout * msecs_to_jiffies(MSEC_PER_SEC);
+}
+
+
/*
* Initializes and runs test case. Does not clean up or do post validations.
*/
@@ -527,7 +568,8 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
kunit_try_catch_init(try_catch,
test,
kunit_try_run_case,
- kunit_catch_run_case);
+ kunit_catch_run_case,
+ kunit_test_timeout(suite, test_case));
context.test = test;
context.suite = suite;
context.test_case = test_case;
@@ -537,7 +579,8 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
kunit_try_catch_init(try_catch,
test,
kunit_try_run_case_cleanup,
- kunit_catch_run_case_cleanup);
+ kunit_catch_run_case_cleanup,
+ kunit_test_timeout(suite, test_case));
kunit_try_catch_run(try_catch, &context);
/* Propagate the parameter result to the test case. */
diff --git a/lib/kunit/try-catch-impl.h b/lib/kunit/try-catch-impl.h
index 203ba6a5e740..6f401b97cd0b 100644
--- a/lib/kunit/try-catch-impl.h
+++ b/lib/kunit/try-catch-impl.h
@@ -17,11 +17,13 @@ struct kunit;
static inline void kunit_try_catch_init(struct kunit_try_catch *try_catch,
struct kunit *test,
kunit_try_catch_func_t try,
- kunit_try_catch_func_t catch)
+ kunit_try_catch_func_t catch,
+ unsigned long timeout)
{
try_catch->test = test;
try_catch->try = try;
try_catch->catch = catch;
+ try_catch->timeout = timeout;
}
#endif /* _KUNIT_TRY_CATCH_IMPL_H */
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
index 6bbe0025b079..d84a879f0a78 100644
--- a/lib/kunit/try-catch.c
+++ b/lib/kunit/try-catch.c
@@ -34,31 +34,6 @@ static int kunit_generic_run_threadfn_adapter(void *data)
return 0;
}
-static unsigned long kunit_test_timeout(void)
-{
- /*
- * TODO(brendanhiggins@google.com): We should probably have some type of
- * variable timeout here. The only question is what that timeout value
- * should be.
- *
- * The intention has always been, at some point, to be able to label
- * tests with some type of size bucket (unit/small, integration/medium,
- * large/system/end-to-end, etc), where each size bucket would get a
- * default timeout value kind of like what Bazel does:
- * https://docs.bazel.build/versions/master/be/common-definitions.html#test.size
- * There is still some debate to be had on exactly how we do this. (For
- * one, we probably want to have some sort of test runner level
- * timeout.)
- *
- * For more background on this topic, see:
- * https://mike-bland.com/2011/11/01/small-medium-large.html
- *
- * If tests timeout due to exceeding sysctl_hung_task_timeout_secs,
- * the task will be killed and an oops generated.
- */
- return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */
-}
-
void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
{
struct kunit *test = try_catch->test;
@@ -85,8 +60,8 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
task_done = task_struct->vfork_done;
wake_up_process(task_struct);
- time_remaining = wait_for_completion_timeout(task_done,
- kunit_test_timeout());
+ time_remaining = wait_for_completion_timeout(
+ task_done, try_catch->timeout);
if (time_remaining == 0) {
try_catch->try_result = -ETIMEDOUT;
kthread_stop(task_struct);
diff --git a/lib/kunit/user_alloc.c b/lib/kunit/user_alloc.c
index 46951be018be..b8cac765e620 100644
--- a/lib/kunit/user_alloc.c
+++ b/lib/kunit/user_alloc.c
@@ -22,8 +22,7 @@ struct kunit_vm_mmap_params {
unsigned long offset;
};
-/* Create and attach a new mm if it doesn't already exist. */
-static int kunit_attach_mm(void)
+int kunit_attach_mm(void)
{
struct mm_struct *mm;
@@ -49,6 +48,7 @@ static int kunit_attach_mm(void)
return 0;
}
+EXPORT_SYMBOL_GPL(kunit_attach_mm);
static int kunit_vm_mmap_init(struct kunit_resource *res, void *context)
{
diff --git a/lib/raid6/recov_s390xc.c b/lib/raid6/recov_s390xc.c
index 179eec900cea..4a7aa466f0ef 100644
--- a/lib/raid6/recov_s390xc.c
+++ b/lib/raid6/recov_s390xc.c
@@ -6,7 +6,6 @@
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#include <linux/export.h>
#include <linux/raid/pq.h>
static inline void xor_block(u8 *p1, u8 *p2)
diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c
index cf5609b1ca79..a9e6ffcff04b 100644
--- a/lib/ref_tracker.c
+++ b/lib/ref_tracker.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
+#include <linux/seq_file.h>
#define REF_TRACKER_STACK_ENTRIES 16
#define STACK_BUF_SIZE 1024
@@ -28,6 +29,45 @@ struct ref_tracker_dir_stats {
} stacks[];
};
+#ifdef CONFIG_DEBUG_FS
+#include <linux/xarray.h>
+
+/*
+ * ref_tracker_dir_init() is usually called in allocation-safe contexts, but
+ * the same is not true of ref_tracker_dir_exit() which can be called from
+ * anywhere an object is freed. Removing debugfs dentries is a blocking
+ * operation, so we defer that work to the debugfs_reap_worker.
+ *
+ * Each dentry is tracked in the appropriate xarray. When
+ * ref_tracker_dir_exit() is called, its entries in the xarrays are marked and
+ * the workqueue job is scheduled. The worker then runs and deletes any marked
+ * dentries asynchronously.
+ */
+static struct xarray debugfs_dentries;
+static struct xarray debugfs_symlinks;
+static struct work_struct debugfs_reap_worker;
+
+#define REF_TRACKER_DIR_DEAD XA_MARK_0
+static inline void ref_tracker_debugfs_mark(struct ref_tracker_dir *dir)
+{
+ unsigned long flags;
+
+ xa_lock_irqsave(&debugfs_dentries, flags);
+ __xa_set_mark(&debugfs_dentries, (unsigned long)dir, REF_TRACKER_DIR_DEAD);
+ xa_unlock_irqrestore(&debugfs_dentries, flags);
+
+ xa_lock_irqsave(&debugfs_symlinks, flags);
+ __xa_set_mark(&debugfs_symlinks, (unsigned long)dir, REF_TRACKER_DIR_DEAD);
+ xa_unlock_irqrestore(&debugfs_symlinks, flags);
+
+ schedule_work(&debugfs_reap_worker);
+}
+#else
+static inline void ref_tracker_debugfs_mark(struct ref_tracker_dir *dir)
+{
+}
+#endif
+
static struct ref_tracker_dir_stats *
ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
{
@@ -63,21 +103,39 @@ ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
}
struct ostream {
+ void __ostream_printf (*func)(struct ostream *stream, char *fmt, ...);
+ char *prefix;
char *buf;
+ struct seq_file *seq;
int size, used;
};
+static void __ostream_printf pr_ostream_log(struct ostream *stream, char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+static void __ostream_printf pr_ostream_buf(struct ostream *stream, char *fmt, ...)
+{
+ int ret, len = stream->size - stream->used;
+ va_list args;
+
+ va_start(args, fmt);
+ ret = vsnprintf(stream->buf + stream->used, len, fmt, args);
+ va_end(args);
+ if (ret > 0)
+ stream->used += min(ret, len);
+}
+
#define pr_ostream(stream, fmt, args...) \
({ \
struct ostream *_s = (stream); \
\
- if (!_s->buf) { \
- pr_err(fmt, ##args); \
- } else { \
- int ret, len = _s->size - _s->used; \
- ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \
- _s->used += min(ret, len); \
- } \
+ _s->func(_s, fmt, ##args); \
})
static void
@@ -96,8 +154,8 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
stats = ref_tracker_get_stats(dir, display_limit);
if (IS_ERR(stats)) {
- pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n",
- dir->name, dir, stats);
+ pr_ostream(s, "%s%s@%p: couldn't get stats, error %pe\n",
+ s->prefix, dir->class, dir, stats);
return;
}
@@ -107,14 +165,15 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
stack = stats->stacks[i].stack_handle;
if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4))
sbuf[0] = 0;
- pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir,
- stats->stacks[i].count, stats->total, sbuf);
+ pr_ostream(s, "%s%s@%p has %d/%d users at\n%s\n", s->prefix,
+ dir->class, dir, stats->stacks[i].count,
+ stats->total, sbuf);
skipped -= stats->stacks[i].count;
}
if (skipped)
- pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n",
- dir->name, dir, skipped, stats->total);
+ pr_ostream(s, "%s%s@%p skipped reports about %d/%d users.\n",
+ s->prefix, dir->class, dir, skipped, stats->total);
kfree(sbuf);
@@ -124,7 +183,8 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
unsigned int display_limit)
{
- struct ostream os = {};
+ struct ostream os = { .func = pr_ostream_log,
+ .prefix = "ref_tracker: " };
__ref_tracker_dir_pr_ostream(dir, display_limit, &os);
}
@@ -143,7 +203,10 @@ EXPORT_SYMBOL(ref_tracker_dir_print);
int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size)
{
- struct ostream os = { .buf = buf, .size = size };
+ struct ostream os = { .func = pr_ostream_buf,
+ .prefix = "ref_tracker: ",
+ .buf = buf,
+ .size = size };
unsigned long flags;
spin_lock_irqsave(&dir->lock, flags);
@@ -161,6 +224,11 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
bool leak = false;
dir->dead = true;
+ /*
+ * The xarray entries must be marked before the dir->lock is taken to
+ * protect simultaneous debugfs readers.
+ */
+ ref_tracker_debugfs_mark(dir);
spin_lock_irqsave(&dir->lock, flags);
list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
list_del(&tracker->head);
@@ -273,3 +341,194 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
return 0;
}
EXPORT_SYMBOL_GPL(ref_tracker_free);
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static struct dentry *ref_tracker_debug_dir = (struct dentry *)-ENOENT;
+
+static void __ostream_printf pr_ostream_seq(struct ostream *stream, char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ seq_vprintf(stream->seq, fmt, args);
+ va_end(args);
+}
+
+static int ref_tracker_dir_seq_print(struct ref_tracker_dir *dir, struct seq_file *seq)
+{
+ struct ostream os = { .func = pr_ostream_seq,
+ .prefix = "",
+ .seq = seq };
+
+ __ref_tracker_dir_pr_ostream(dir, 16, &os);
+
+ return os.used;
+}
+
+static int ref_tracker_debugfs_show(struct seq_file *f, void *v)
+{
+ struct ref_tracker_dir *dir = f->private;
+ unsigned long index = (unsigned long)dir;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * "dir" may not exist at this point if ref_tracker_dir_exit() has
+ * already been called. Take care not to dereference it until its
+ * legitimacy is established.
+ *
+ * The xa_lock is necessary to ensure that "dir" doesn't disappear
+ * before its lock can be taken. If it's in the hash and not marked
+ * dead, then it's safe to take dir->lock which prevents
+ * ref_tracker_dir_exit() from completing. Once the dir->lock is
+ * acquired, the xa_lock can be released. All of this must be IRQ-safe.
+ */
+ xa_lock_irqsave(&debugfs_dentries, flags);
+ if (!xa_load(&debugfs_dentries, index) ||
+ xa_get_mark(&debugfs_dentries, index, REF_TRACKER_DIR_DEAD)) {
+ xa_unlock_irqrestore(&debugfs_dentries, flags);
+ return -ENODATA;
+ }
+
+ spin_lock(&dir->lock);
+ xa_unlock(&debugfs_dentries);
+ ret = ref_tracker_dir_seq_print(dir, f);
+ spin_unlock_irqrestore(&dir->lock, flags);
+ return ret;
+}
+
+static int ref_tracker_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct ref_tracker_dir *dir = inode->i_private;
+
+ return single_open(filp, ref_tracker_debugfs_show, dir);
+}
+
+static const struct file_operations ref_tracker_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = ref_tracker_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * ref_tracker_dir_debugfs - create debugfs file for ref_tracker_dir
+ * @dir: ref_tracker_dir to be associated with debugfs file
+ *
+ * In most cases, a debugfs file will be created automatically for every
+ * ref_tracker_dir. If the object was created before debugfs is brought up
+ * then that may fail. In those cases, it is safe to call this at a later
+ * time to create the file.
+ */
+void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+ char name[NAME_MAX + 1];
+ struct dentry *dentry;
+ int ret;
+
+ /* No-op if already created */
+ dentry = xa_load(&debugfs_dentries, (unsigned long)dir);
+ if (dentry && !xa_is_err(dentry))
+ return;
+
+ ret = snprintf(name, sizeof(name), "%s@%px", dir->class, dir);
+ name[sizeof(name) - 1] = '\0';
+
+ if (ret < sizeof(name)) {
+ dentry = debugfs_create_file(name, S_IFREG | 0400,
+ ref_tracker_debug_dir, dir,
+ &ref_tracker_debugfs_fops);
+ if (!IS_ERR(dentry)) {
+ void *old;
+
+ old = xa_store_irq(&debugfs_dentries, (unsigned long)dir,
+ dentry, GFP_KERNEL);
+
+ if (xa_is_err(old))
+ debugfs_remove(dentry);
+ else
+ WARN_ON_ONCE(old);
+ }
+ }
+}
+EXPORT_SYMBOL(ref_tracker_dir_debugfs);
+
+void __ostream_printf ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
+{
+ char name[NAME_MAX + 1];
+ struct dentry *symlink, *dentry;
+ va_list args;
+ int ret;
+
+ symlink = xa_load(&debugfs_symlinks, (unsigned long)dir);
+ dentry = xa_load(&debugfs_dentries, (unsigned long)dir);
+
+ /* Already created?*/
+ if (symlink && !xa_is_err(symlink))
+ return;
+
+ if (!dentry || xa_is_err(dentry))
+ return;
+
+ va_start(args, fmt);
+ ret = vsnprintf(name, sizeof(name), fmt, args);
+ va_end(args);
+ name[sizeof(name) - 1] = '\0';
+
+ if (ret < sizeof(name)) {
+ symlink = debugfs_create_symlink(name, ref_tracker_debug_dir,
+ dentry->d_name.name);
+ if (!IS_ERR(symlink)) {
+ void *old;
+
+ old = xa_store_irq(&debugfs_symlinks, (unsigned long)dir,
+ symlink, GFP_KERNEL);
+ if (xa_is_err(old))
+ debugfs_remove(symlink);
+ else
+ WARN_ON_ONCE(old);
+ }
+ }
+}
+EXPORT_SYMBOL(ref_tracker_dir_symlink);
+
+static void debugfs_reap_work(struct work_struct *work)
+{
+ struct dentry *dentry;
+ unsigned long index;
+ bool reaped;
+
+ do {
+ reaped = false;
+ xa_for_each_marked(&debugfs_symlinks, index, dentry, REF_TRACKER_DIR_DEAD) {
+ xa_erase_irq(&debugfs_symlinks, index);
+ debugfs_remove(dentry);
+ reaped = true;
+ }
+ xa_for_each_marked(&debugfs_dentries, index, dentry, REF_TRACKER_DIR_DEAD) {
+ xa_erase_irq(&debugfs_dentries, index);
+ debugfs_remove(dentry);
+ reaped = true;
+ }
+ } while (reaped);
+}
+
+static int __init ref_tracker_debugfs_postcore_init(void)
+{
+ INIT_WORK(&debugfs_reap_worker, debugfs_reap_work);
+ xa_init_flags(&debugfs_dentries, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&debugfs_symlinks, XA_FLAGS_LOCK_IRQ);
+ return 0;
+}
+postcore_initcall(ref_tracker_debugfs_postcore_init);
+
+static int __init ref_tracker_debugfs_late_init(void)
+{
+ ref_tracker_debug_dir = debugfs_create_dir("ref_tracker", NULL);
+ return 0;
+}
+late_initcall(ref_tracker_debugfs_late_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index a2bb7738c373..94b3f6b19538 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,10 +22,8 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
if (is_percpu_thread())
goto out;
-#ifdef CONFIG_SMP
if (current->migration_disabled)
goto out;
-#endif
/*
* It is valid to assume CPU-locality during early bootup:
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index 222b39fc2629..ce5c4c36a084 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -908,50 +908,22 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
return err;
}
-static int test_hints_case(const struct hints_case *hints_case)
+static int test_hints_case2(const struct hints_case *hints_case,
+ struct objagg_hints *hints, struct objagg *objagg)
{
struct objagg_obj *objagg_obj;
- struct objagg_hints *hints;
struct world world2 = {};
- struct world world = {};
struct objagg *objagg2;
- struct objagg *objagg;
const char *errmsg;
int i;
int err;
- objagg = objagg_create(&delta_ops, NULL, &world);
- if (IS_ERR(objagg))
- return PTR_ERR(objagg);
-
- for (i = 0; i < hints_case->key_ids_count; i++) {
- objagg_obj = world_obj_get(&world, objagg,
- hints_case->key_ids[i]);
- if (IS_ERR(objagg_obj)) {
- err = PTR_ERR(objagg_obj);
- goto err_world_obj_get;
- }
- }
-
- pr_debug_stats(objagg);
- err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
- if (err) {
- pr_err("Stats: %s\n", errmsg);
- goto err_check_expect_stats;
- }
-
- hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
- if (IS_ERR(hints)) {
- err = PTR_ERR(hints);
- goto err_hints_get;
- }
-
pr_debug_hints_stats(hints);
err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints,
&errmsg);
if (err) {
pr_err("Hints stats: %s\n", errmsg);
- goto err_check_expect_hints_stats;
+ return err;
}
objagg2 = objagg_create(&delta_ops, hints, &world2);
@@ -983,7 +955,48 @@ err_world2_obj_get:
world_obj_put(&world2, objagg, hints_case->key_ids[i]);
i = hints_case->key_ids_count;
objagg_destroy(objagg2);
-err_check_expect_hints_stats:
+
+ return err;
+}
+
+static int test_hints_case(const struct hints_case *hints_case)
+{
+ struct objagg_obj *objagg_obj;
+ struct objagg_hints *hints;
+ struct world world = {};
+ struct objagg *objagg;
+ const char *errmsg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < hints_case->key_ids_count; i++) {
+ objagg_obj = world_obj_get(&world, objagg,
+ hints_case->key_ids[i]);
+ if (IS_ERR(objagg_obj)) {
+ err = PTR_ERR(objagg_obj);
+ goto err_world_obj_get;
+ }
+ }
+
+ pr_debug_stats(objagg);
+ err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Stats: %s\n", errmsg);
+ goto err_check_expect_stats;
+ }
+
+ hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
+ if (IS_ERR(hints)) {
+ err = PTR_ERR(hints);
+ goto err_hints_get;
+ }
+
+ err = test_hints_case2(hints_case, hints, objagg);
+
objagg_hints_put(hints);
err_hints_get:
err_check_expect_stats:
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 83434b722193..fa6d728a8b5b 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -46,5 +46,6 @@ obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
+obj-$(CONFIG_RATELIMIT_KUNIT_TEST) += test_ratelimit.o
obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/tests/longest_symbol_kunit.c b/lib/tests/longest_symbol_kunit.c
index e3c28ff1807f..9b4de3050ba7 100644
--- a/lib/tests/longest_symbol_kunit.c
+++ b/lib/tests/longest_symbol_kunit.c
@@ -3,8 +3,7 @@
* Test the longest symbol length. Execute with:
* ./tools/testing/kunit/kunit.py run longest-symbol
* --arch=x86_64 --kconfig_add CONFIG_KPROBES=y --kconfig_add CONFIG_MODULES=y
- * --kconfig_add CONFIG_RETPOLINE=n --kconfig_add CONFIG_CFI_CLANG=n
- * --kconfig_add CONFIG_MITIGATION_RETPOLINE=n
+ * --kconfig_add CONFIG_CPU_MITIGATIONS=n --kconfig_add CONFIG_GCOV_KERNEL=n
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
new file mode 100644
index 000000000000..bfaeca49304a
--- /dev/null
+++ b/lib/tests/test_ratelimit.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+
+#include <linux/ratelimit.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/cpumask.h>
+
+/* a simple boot-time regression test */
+
+#define TESTRL_INTERVAL (5 * HZ)
+static DEFINE_RATELIMIT_STATE(testrl, TESTRL_INTERVAL, 3);
+
+#define test_ratelimited(test, expected) \
+ KUNIT_ASSERT_EQ(test, ___ratelimit(&testrl, "test_ratelimit_smoke"), (expected))
+
+static void test_ratelimit_smoke(struct kunit *test)
+{
+ // Check settings.
+ KUNIT_ASSERT_GE(test, TESTRL_INTERVAL, 100);
+
+ // Test normal operation.
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+
+ schedule_timeout_idle(TESTRL_INTERVAL / 2);
+ test_ratelimited(test, false);
+
+ schedule_timeout_idle(TESTRL_INTERVAL * 3 / 4);
+ test_ratelimited(test, true);
+
+ schedule_timeout_idle(2 * TESTRL_INTERVAL);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+
+ schedule_timeout_idle(TESTRL_INTERVAL / 2 );
+ test_ratelimited(test, true);
+ schedule_timeout_idle(TESTRL_INTERVAL * 3 / 4);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+
+ // Test disabling.
+ testrl.burst = 0;
+ test_ratelimited(test, false);
+ testrl.burst = 2;
+ testrl.interval = 0;
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+
+ // Testing re-enabling.
+ testrl.interval = TESTRL_INTERVAL;
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+ test_ratelimited(test, false);
+}
+
+static struct ratelimit_state stressrl = RATELIMIT_STATE_INIT_FLAGS("stressrl", HZ / 10, 3,
+ RATELIMIT_MSG_ON_RELEASE);
+
+static int doneflag;
+static const int stress_duration = 2 * HZ;
+
+struct stress_kthread {
+ unsigned long nattempts;
+ unsigned long nunlimited;
+ unsigned long nlimited;
+ unsigned long nmissed;
+ struct task_struct *tp;
+};
+
+static int test_ratelimit_stress_child(void *arg)
+{
+ struct stress_kthread *sktp = arg;
+
+ set_user_nice(current, MAX_NICE);
+ WARN_ON_ONCE(!sktp->tp);
+
+ while (!READ_ONCE(doneflag)) {
+ sktp->nattempts++;
+ if (___ratelimit(&stressrl, __func__))
+ sktp->nunlimited++;
+ else
+ sktp->nlimited++;
+ cond_resched();
+ }
+
+ sktp->nmissed = ratelimit_state_reset_miss(&stressrl);
+ return 0;
+}
+
+static void test_ratelimit_stress(struct kunit *test)
+{
+ int i;
+ const int n_stress_kthread = cpumask_weight(cpu_online_mask);
+ struct stress_kthread skt = { 0 };
+ struct stress_kthread *sktp = kcalloc(n_stress_kthread, sizeof(*sktp), GFP_KERNEL);
+
+ KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "Memory allocation failure");
+ for (i = 0; i < n_stress_kthread; i++) {
+ sktp[i].tp = kthread_run(test_ratelimit_stress_child, &sktp[i], "%s/%i",
+ "test_ratelimit_stress_child", i);
+ KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "kthread creation failure");
+ pr_alert("Spawned test_ratelimit_stress_child %d\n", i);
+ }
+ schedule_timeout_idle(stress_duration);
+ WRITE_ONCE(doneflag, 1);
+ for (i = 0; i < n_stress_kthread; i++) {
+ kthread_stop(sktp[i].tp);
+ skt.nattempts += sktp[i].nattempts;
+ skt.nunlimited += sktp[i].nunlimited;
+ skt.nlimited += sktp[i].nlimited;
+ skt.nmissed += sktp[i].nmissed;
+ }
+ KUNIT_ASSERT_EQ_MSG(test, skt.nunlimited + skt.nlimited, skt.nattempts,
+ "Outcomes not equal to attempts");
+ KUNIT_ASSERT_EQ_MSG(test, skt.nlimited, skt.nmissed, "Misses not equal to limits");
+}
+
+static struct kunit_case ratelimit_test_cases[] = {
+ KUNIT_CASE_SLOW(test_ratelimit_smoke),
+ KUNIT_CASE_SLOW(test_ratelimit_stress),
+ {}
+};
+
+static struct kunit_suite ratelimit_test_suite = {
+ .name = "lib_ratelimit",
+ .test_cases = ratelimit_test_cases,
+};
+
+kunit_test_suites(&ratelimit_test_suite);
+
+MODULE_DESCRIPTION("___ratelimit() KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 93ef801a97ef..02ea19f67164 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -2,6 +2,7 @@
/*
* Generic userspace implementations of gettimeofday() and similar.
*/
+#include <vdso/auxclock.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
@@ -71,6 +72,42 @@ static inline bool vdso_cycles_ok(u64 cycles)
}
#endif
+static __always_inline bool vdso_clockid_valid(clockid_t clock)
+{
+ /* Check for negative values or invalid clocks */
+ return likely((u32) clock <= CLOCK_AUX_LAST);
+}
+
+/*
+ * Must not be invoked within the sequence read section as a race inside
+ * that loop could result in __iter_div_u64_rem() being extremely slow.
+ */
+static __always_inline void vdso_set_timespec(struct __kernel_timespec *ts, u64 sec, u64 ns)
+{
+ ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+}
+
+static __always_inline
+bool vdso_get_timestamp(const struct vdso_time_data *vd, const struct vdso_clock *vc,
+ unsigned int clkidx, u64 *sec, u64 *ns)
+{
+ const struct vdso_timestamp *vdso_ts = &vc->basetime[clkidx];
+ u64 cycles;
+
+ if (unlikely(!vdso_clocksource_ok(vc)))
+ return false;
+
+ cycles = __arch_get_hw_counter(vc->clock_mode, vd);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return false;
+
+ *ns = vdso_calc_ns(vc, cycles, vdso_ts->nsec);
+ *sec = vdso_ts->sec;
+
+ return true;
+}
+
#ifdef CONFIG_TIME_NS
#ifdef CONFIG_GENERIC_VDSO_DATA_STORE
@@ -82,48 +119,35 @@ const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_tim
#endif /* CONFIG_GENERIC_VDSO_DATA_STORE */
static __always_inline
-int do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
- clockid_t clk, struct __kernel_timespec *ts)
+bool do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
const struct timens_offset *offs = &vcns->offset[clk];
const struct vdso_clock *vc = vd->clock_data;
- const struct vdso_timestamp *vdso_ts;
- u64 cycles, ns;
u32 seq;
s64 sec;
+ u64 ns;
if (clk != CLOCK_MONOTONIC_RAW)
vc = &vc[CS_HRES_COARSE];
else
vc = &vc[CS_RAW];
- vdso_ts = &vc->basetime[clk];
do {
seq = vdso_read_begin(vc);
- if (unlikely(!vdso_clocksource_ok(vc)))
- return -1;
-
- cycles = __arch_get_hw_counter(vc->clock_mode, vd);
- if (unlikely(!vdso_cycles_ok(cycles)))
- return -1;
- ns = vdso_calc_ns(vc, cycles, vdso_ts->nsec);
- sec = vdso_ts->sec;
+ if (!vdso_get_timestamp(vd, vc, clk, &sec, &ns))
+ return false;
} while (unlikely(vdso_read_retry(vc, seq)));
/* Add the namespace offset */
sec += offs->sec;
ns += offs->nsec;
- /*
- * Do this outside the loop: a race inside the loop could result
- * in __iter_div_u64_rem() being extremely slow.
- */
- ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
+ vdso_set_timespec(ts, sec, ns);
- return 0;
+ return true;
}
#else
static __always_inline
@@ -133,24 +157,23 @@ const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_tim
}
static __always_inline
-int do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
- clockid_t clk, struct __kernel_timespec *ts)
+bool do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- return -EINVAL;
+ return false;
}
#endif
static __always_inline
-int do_hres(const struct vdso_time_data *vd, const struct vdso_clock *vc,
- clockid_t clk, struct __kernel_timespec *ts)
+bool do_hres(const struct vdso_time_data *vd, const struct vdso_clock *vc,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct vdso_timestamp *vdso_ts = &vc->basetime[clk];
- u64 cycles, sec, ns;
+ u64 sec, ns;
u32 seq;
/* Allows to compile the high resolution parts out */
if (!__arch_vdso_hres_capable())
- return -1;
+ return false;
do {
/*
@@ -172,30 +195,19 @@ int do_hres(const struct vdso_time_data *vd, const struct vdso_clock *vc,
}
smp_rmb();
- if (unlikely(!vdso_clocksource_ok(vc)))
- return -1;
-
- cycles = __arch_get_hw_counter(vc->clock_mode, vd);
- if (unlikely(!vdso_cycles_ok(cycles)))
- return -1;
- ns = vdso_calc_ns(vc, cycles, vdso_ts->nsec);
- sec = vdso_ts->sec;
+ if (!vdso_get_timestamp(vd, vc, clk, &sec, &ns))
+ return false;
} while (unlikely(vdso_read_retry(vc, seq)));
- /*
- * Do this outside the loop: a race inside the loop could result
- * in __iter_div_u64_rem() being extremely slow.
- */
- ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
+ vdso_set_timespec(ts, sec, ns);
- return 0;
+ return true;
}
#ifdef CONFIG_TIME_NS
static __always_inline
-int do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
- clockid_t clk, struct __kernel_timespec *ts)
+bool do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
const struct timens_offset *offs = &vcns->offset[clk];
@@ -217,26 +229,22 @@ int do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock
sec += offs->sec;
nsec += offs->nsec;
- /*
- * Do this outside the loop: a race inside the loop could result
- * in __iter_div_u64_rem() being extremely slow.
- */
- ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
- ts->tv_nsec = nsec;
- return 0;
+ vdso_set_timespec(ts, sec, nsec);
+
+ return true;
}
#else
static __always_inline
-int do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
- clockid_t clk, struct __kernel_timespec *ts)
+bool do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- return -1;
+ return false;
}
#endif
static __always_inline
-int do_coarse(const struct vdso_time_data *vd, const struct vdso_clock *vc,
- clockid_t clk, struct __kernel_timespec *ts)
+bool do_coarse(const struct vdso_time_data *vd, const struct vdso_clock *vc,
+ clockid_t clk, struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vc->basetime[clk];
u32 seq;
@@ -258,19 +266,60 @@ int do_coarse(const struct vdso_time_data *vd, const struct vdso_clock *vc,
ts->tv_nsec = vdso_ts->nsec;
} while (unlikely(vdso_read_retry(vc, seq)));
- return 0;
+ return true;
+}
+
+static __always_inline
+bool do_aux(const struct vdso_time_data *vd, clockid_t clock, struct __kernel_timespec *ts)
+{
+ const struct vdso_clock *vc;
+ u32 seq, idx;
+ u64 sec, ns;
+
+ if (!IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS))
+ return false;
+
+ idx = clock - CLOCK_AUX;
+ vc = &vd->aux_clock_data[idx];
+
+ do {
+ /*
+ * Open coded function vdso_read_begin() to handle
+ * VDSO_CLOCK_TIMENS. See comment in do_hres().
+ */
+ while ((seq = READ_ONCE(vc->seq)) & 1) {
+ if (IS_ENABLED(CONFIG_TIME_NS) && vc->clock_mode == VDSO_CLOCKMODE_TIMENS) {
+ vd = __arch_get_vdso_u_timens_data(vd);
+ vc = &vd->aux_clock_data[idx];
+ /* Re-read from the real time data page */
+ continue;
+ }
+ cpu_relax();
+ }
+ smp_rmb();
+
+ /* Auxclock disabled? */
+ if (vc->clock_mode == VDSO_CLOCKMODE_NONE)
+ return false;
+
+ if (!vdso_get_timestamp(vd, vc, VDSO_BASE_AUX, &sec, &ns))
+ return false;
+ } while (unlikely(vdso_read_retry(vc, seq)));
+
+ vdso_set_timespec(ts, sec, ns);
+
+ return true;
}
-static __always_inline int
+static __always_inline bool
__cvdso_clock_gettime_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
const struct vdso_clock *vc = vd->clock_data;
u32 msk;
- /* Check for negative values or invalid clocks */
- if (unlikely((u32) clock >= MAX_CLOCKS))
- return -1;
+ if (!vdso_clockid_valid(clock))
+ return false;
/*
* Convert the clockid to a bitmask and use it to check which
@@ -283,8 +332,10 @@ __cvdso_clock_gettime_common(const struct vdso_time_data *vd, clockid_t clock,
return do_coarse(vd, &vc[CS_HRES_COARSE], clock, ts);
else if (msk & VDSO_RAW)
vc = &vc[CS_RAW];
+ else if (msk & VDSO_AUX)
+ return do_aux(vd, clock, ts);
else
- return -1;
+ return false;
return do_hres(vd, vc, clock, ts);
}
@@ -293,9 +344,11 @@ static __maybe_unused int
__cvdso_clock_gettime_data(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
- int ret = __cvdso_clock_gettime_common(vd, clock, ts);
+ bool ok;
+
+ ok = __cvdso_clock_gettime_common(vd, clock, ts);
- if (unlikely(ret))
+ if (unlikely(!ok))
return clock_gettime_fallback(clock, ts);
return 0;
}
@@ -312,18 +365,18 @@ __cvdso_clock_gettime32_data(const struct vdso_time_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
- int ret;
+ bool ok;
- ret = __cvdso_clock_gettime_common(vd, clock, &ts);
+ ok = __cvdso_clock_gettime_common(vd, clock, &ts);
- if (unlikely(ret))
+ if (unlikely(!ok))
return clock_gettime32_fallback(clock, res);
- /* For ret == 0 */
+ /* For ok == true */
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
- return ret;
+ return 0;
}
static __maybe_unused int
@@ -342,7 +395,7 @@ __cvdso_gettimeofday_data(const struct vdso_time_data *vd,
if (likely(tv != NULL)) {
struct __kernel_timespec ts;
- if (do_hres(vd, &vc[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
+ if (!do_hres(vd, &vc[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
return gettimeofday_fallback(tv, tz);
tv->tv_sec = ts.tv_sec;
@@ -396,16 +449,15 @@ static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock,
- struct __kernel_timespec *res)
+bool __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
{
const struct vdso_clock *vc = vd->clock_data;
u32 msk;
u64 ns;
- /* Check for negative values or invalid clocks */
- if (unlikely((u32) clock >= MAX_CLOCKS))
- return -1;
+ if (!vdso_clockid_valid(clock))
+ return false;
if (IS_ENABLED(CONFIG_TIME_NS) &&
vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
@@ -426,24 +478,28 @@ int __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock
* Preserves the behaviour of posix_get_coarse_res().
*/
ns = LOW_RES_NSEC;
+ } else if (msk & VDSO_AUX) {
+ ns = aux_clock_resolution_ns();
} else {
- return -1;
+ return false;
}
if (likely(res)) {
res->tv_sec = 0;
res->tv_nsec = ns;
}
- return 0;
+ return true;
}
static __maybe_unused
int __cvdso_clock_getres_data(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
- int ret = __cvdso_clock_getres_common(vd, clock, res);
+ bool ok;
- if (unlikely(ret))
+ ok = __cvdso_clock_getres_common(vd, clock, res);
+
+ if (unlikely(!ok))
return clock_getres_fallback(clock, res);
return 0;
}
@@ -460,18 +516,18 @@ __cvdso_clock_getres_time32_data(const struct vdso_time_data *vd, clockid_t cloc
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
- int ret;
+ bool ok;
- ret = __cvdso_clock_getres_common(vd, clock, &ts);
+ ok = __cvdso_clock_getres_common(vd, clock, &ts);
- if (unlikely(ret))
+ if (unlikely(!ok))
return clock_getres32_fallback(clock, res);
if (likely(res)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
- return ret;
+ return 0;
}
static __maybe_unused int