summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/crypto/utils.c2
-rw-r--r--lib/debugobjects.c125
-rw-r--r--lib/fault-inject.c191
-rw-r--r--lib/iov_iter.c124
-rw-r--r--lib/kunit/debugfs.c14
-rw-r--r--lib/kunit/kunit-test.c77
-rw-r--r--lib/kunit/test.c57
-rw-r--r--lib/libcrc32c.c6
-rw-r--r--lib/list-test.c300
-rw-r--r--lib/test_vmalloc.c2
-rw-r--r--lib/vdso/Makefile13
-rw-r--r--lib/vsprintf.c2
14 files changed, 785 insertions, 146 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index ce2abffb9ed8..5c2da561c516 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -92,6 +92,7 @@ config ARCH_USE_SYM_ANNOTATIONS
config INDIRECT_PIO
bool "Access I/O in non-MMIO mode"
depends on ARM64
+ depends on HAS_IOPORT
help
On some platforms where no separate I/O space exists, there are I/O
hosts which can not be accessed in MMIO mode. Using the logical PIO
@@ -509,6 +510,9 @@ config HAS_IOMEM
depends on !NO_IOMEM
default y
+config HAS_IOPORT
+ bool
+
config HAS_IOPORT_MAP
bool
depends on HAS_IOMEM && !NO_IOPORT_MAP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 39d1d93164bd..786ba9fd8555 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1958,9 +1958,21 @@ config FAIL_SUNRPC
Provide fault-injection capability for SunRPC and
its consumers.
+config FAULT_INJECTION_CONFIGFS
+ bool "Configfs interface for fault-injection capabilities"
+ depends on FAULT_INJECTION
+ select CONFIGFS_FS
+ help
+ This option allows configfs-based drivers to dynamically configure
+ fault-injection via configfs. Each parameter for driver-specific
+ fault-injection can be made visible as a configfs attribute in a
+ configfs group.
+
+
config FAULT_INJECTION_STACKTRACE_FILTER
bool "stacktrace filter for fault-injection capabilities"
- depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
+ depends on FAULT_INJECTION
+ depends on (FAULT_INJECTION_DEBUG_FS || FAULT_INJECTION_CONFIGFS) && STACKTRACE_SUPPORT
select STACKTRACE
depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
help
diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c
index 53230ab1b195..c852c7151b0a 100644
--- a/lib/crypto/utils.c
+++ b/lib/crypto/utils.c
@@ -6,7 +6,7 @@
*/
#include <asm/unaligned.h>
-#include <crypto/algapi.h>
+#include <crypto/utils.h>
#include <linux/module.h>
/*
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index df86e649d8be..b796799fadb2 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -216,10 +216,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
return obj;
}
-/*
- * Allocate a new object. If the pool is empty, switch off the debugger.
- * Must be called with interrupts disabled.
- */
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
{
@@ -552,11 +548,49 @@ static void debug_object_is_on_stack(void *addr, int onstack)
WARN_ON(1);
}
+static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
+ const struct debug_obj_descr *descr,
+ bool onstack, bool alloc_ifstatic)
+{
+ struct debug_obj *obj = lookup_object(addr, b);
+ enum debug_obj_state state = ODEBUG_STATE_NONE;
+
+ if (likely(obj))
+ return obj;
+
+ /*
+ * debug_object_init() unconditionally allocates untracked
+ * objects. It does not matter whether it is a static object or
+ * not.
+ *
+ * debug_object_assert_init() and debug_object_activate() allow
+ * allocation only if the descriptor callback confirms that the
+ * object is static and considered initialized. For non-static
+ * objects the allocation needs to be done from the fixup callback.
+ */
+ if (unlikely(alloc_ifstatic)) {
+ if (!descr->is_static_object || !descr->is_static_object(addr))
+ return ERR_PTR(-ENOENT);
+ /* Statically allocated objects are considered initialized */
+ state = ODEBUG_STATE_INIT;
+ }
+
+ obj = alloc_object(addr, b, descr);
+ if (likely(obj)) {
+ obj->state = state;
+ debug_object_is_on_stack(addr, onstack);
+ return obj;
+ }
+
+ /* Out of memory. Do the cleanup outside of the locked region */
+ debug_objects_enabled = 0;
+ return NULL;
+}
+
static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
enum debug_obj_state state;
- bool check_stack = false;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -572,16 +606,11 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (!obj) {
- obj = alloc_object(addr, db, descr);
- if (!obj) {
- debug_objects_enabled = 0;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_objects_oom();
- return;
- }
- check_stack = true;
+ obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
}
switch (obj->state) {
@@ -607,8 +636,6 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (check_stack)
- debug_object_is_on_stack(addr, onstack);
}
/**
@@ -648,14 +675,12 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
*/
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
int ret;
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
if (!debug_objects_enabled)
return 0;
@@ -664,8 +689,8 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (obj) {
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ if (likely(!IS_ERR_OR_NULL(obj))) {
bool print_object = false;
switch (obj->state) {
@@ -698,24 +723,16 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * We are here when a static object is activated. We
- * let the type specific code confirm whether this is
- * true or not. if true, we just make sure that the
- * static object is tracked in the object tracker. If
- * not, this must be a bug, so we try to fix it up.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* track this static object */
- debug_object_init(addr, descr);
- debug_object_activate(addr, descr);
- } else {
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
+ /* If NULL the allocation has hit OOM */
+ if (!obj) {
+ debug_objects_oom();
+ return 0;
}
- return 0;
+
+ /* Object is neither static nor tracked. It's not initialized */
+ debug_print_object(&o, "activate");
+ ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
+ return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -869,6 +886,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
*/
void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -879,31 +897,20 @@ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (likely(!IS_ERR_OR_NULL(obj)))
+ return;
- obj = lookup_object(addr, db);
+ /* If NULL the allocation has hit OOM */
if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * Maybe the object is static, and we let the type specific
- * code confirm. Track this static object if true, else invoke
- * fixup.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* Track this static object */
- debug_object_init(addr, descr);
- } else {
- debug_print_object(&o, "assert_init");
- debug_object_fixup(descr->fixup_assert_init, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- }
+ debug_objects_oom();
return;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
+ /* Object is neither tracked nor static. It's not initialized. */
+ debug_print_object(&o, "assert_init");
+ debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
}
EXPORT_SYMBOL_GPL(debug_object_assert_init);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 6cff320c4eb4..d608f9b48c10 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -244,3 +244,194 @@ struct dentry *fault_create_debugfs_attr(const char *name,
EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#ifdef CONFIG_FAULT_INJECTION_CONFIGFS
+
+/* These configfs attribute utilities are copied from drivers/block/null_blk/main.c */
+
+static ssize_t fault_uint_attr_show(unsigned int val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t fault_ulong_attr_show(unsigned long val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n", val);
+}
+
+static ssize_t fault_bool_attr_show(bool val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t fault_atomic_t_attr_show(atomic_t val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val));
+}
+
+static ssize_t fault_uint_attr_store(unsigned int *val, const char *page, size_t count)
+{
+ unsigned int tmp;
+ int result;
+
+ result = kstrtouint(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_ulong_attr_store(unsigned long *val, const char *page, size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_bool_attr_store(bool *val, const char *page, size_t count)
+{
+ bool tmp;
+ int result;
+
+ result = kstrtobool(page, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_atomic_t_attr_store(atomic_t *val, const char *page, size_t count)
+{
+ int tmp;
+ int result;
+
+ result = kstrtoint(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ atomic_set(val, tmp);
+ return count;
+}
+
+#define CONFIGFS_ATTR_NAMED(_pfx, _name, _attr_name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = _attr_name, \
+ .ca_mode = 0644, \
+ .ca_owner = THIS_MODULE, \
+ .show = _pfx##_name##_show, \
+ .store = _pfx##_name##_store, \
+}
+
+static struct fault_config *to_fault_config(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct fault_config, group);
+}
+
+#define FAULT_CONFIGFS_ATTR_NAMED(NAME, ATTR_NAME, MEMBER, TYPE) \
+static ssize_t fault_##NAME##_show(struct config_item *item, char *page) \
+{ \
+ return fault_##TYPE##_attr_show(to_fault_config(item)->attr.MEMBER, page); \
+} \
+static ssize_t fault_##NAME##_store(struct config_item *item, const char *page, size_t count) \
+{ \
+ struct fault_config *config = to_fault_config(item); \
+ return fault_##TYPE##_attr_store(&config->attr.MEMBER, page, count); \
+} \
+CONFIGFS_ATTR_NAMED(fault_, NAME, ATTR_NAME)
+
+#define FAULT_CONFIGFS_ATTR(NAME, TYPE) \
+ FAULT_CONFIGFS_ATTR_NAMED(NAME, __stringify(NAME), NAME, TYPE)
+
+FAULT_CONFIGFS_ATTR(probability, ulong);
+FAULT_CONFIGFS_ATTR(interval, ulong);
+FAULT_CONFIGFS_ATTR(times, atomic_t);
+FAULT_CONFIGFS_ATTR(space, atomic_t);
+FAULT_CONFIGFS_ATTR(verbose, ulong);
+FAULT_CONFIGFS_ATTR_NAMED(ratelimit_interval, "verbose_ratelimit_interval_ms",
+ ratelimit_state.interval, uint);
+FAULT_CONFIGFS_ATTR_NAMED(ratelimit_burst, "verbose_ratelimit_burst",
+ ratelimit_state.burst, uint);
+FAULT_CONFIGFS_ATTR_NAMED(task_filter, "task-filter", task_filter, bool);
+
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+
+static ssize_t fault_stacktrace_depth_show(struct config_item *item, char *page)
+{
+ return fault_ulong_attr_show(to_fault_config(item)->attr.stacktrace_depth, page);
+}
+
+static ssize_t fault_stacktrace_depth_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ to_fault_config(item)->attr.stacktrace_depth =
+ min_t(unsigned long, tmp, MAX_STACK_TRACE_DEPTH);
+
+ return count;
+}
+
+CONFIGFS_ATTR_NAMED(fault_, stacktrace_depth, "stacktrace-depth");
+
+static ssize_t fault_xul_attr_show(unsigned long val, char *page)
+{
+ return snprintf(page, PAGE_SIZE,
+ sizeof(val) == sizeof(u32) ? "0x%08lx\n" : "0x%016lx\n", val);
+}
+
+static ssize_t fault_xul_attr_store(unsigned long *val, const char *page, size_t count)
+{
+ return fault_ulong_attr_store(val, page, count);
+}
+
+FAULT_CONFIGFS_ATTR_NAMED(require_start, "require-start", require_start, xul);
+FAULT_CONFIGFS_ATTR_NAMED(require_end, "require-end", require_end, xul);
+FAULT_CONFIGFS_ATTR_NAMED(reject_start, "reject-start", reject_start, xul);
+FAULT_CONFIGFS_ATTR_NAMED(reject_end, "reject-end", reject_end, xul);
+
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+
+static struct configfs_attribute *fault_config_attrs[] = {
+ &fault_attr_probability,
+ &fault_attr_interval,
+ &fault_attr_times,
+ &fault_attr_space,
+ &fault_attr_verbose,
+ &fault_attr_ratelimit_interval,
+ &fault_attr_ratelimit_burst,
+ &fault_attr_task_filter,
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+ &fault_attr_stacktrace_depth,
+ &fault_attr_require_start,
+ &fault_attr_require_end,
+ &fault_attr_reject_start,
+ &fault_attr_reject_end,
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+ NULL,
+};
+
+static const struct config_item_type fault_config_type = {
+ .ct_attrs = fault_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+void fault_config_init(struct fault_config *config, const char *name)
+{
+ config_group_init_type_name(&config->group, name, &fault_config_type);
+}
+EXPORT_SYMBOL_GPL(fault_config_init);
+
+#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 274014e4eafe..967fba189c5f 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -126,13 +126,13 @@ __out: \
iterate_buf(i, n, base, len, off, \
i->ubuf, (I)) \
} else if (likely(iter_is_iovec(i))) { \
- const struct iovec *iov = i->iov; \
+ const struct iovec *iov = iter_iov(i); \
void __user *base; \
size_t len; \
iterate_iovec(i, n, base, len, off, \
iov, (I)) \
- i->nr_segs -= iov - i->iov; \
- i->iov = iov; \
+ i->nr_segs -= iov - iter_iov(i); \
+ i->__iov = iov; \
} else if (iov_iter_is_bvec(i)) { \
const struct bio_vec *bvec = i->bvec; \
void *base; \
@@ -355,7 +355,7 @@ size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
size_t skip;
size -= count;
- for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
+ for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
@@ -398,7 +398,7 @@ size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
size_t skip;
size -= count;
- for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
+ for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
@@ -425,7 +425,7 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
.nofault = false,
.user_backed = true,
.data_source = direction,
- .iov = iov,
+ .__iov = iov,
.nr_segs = nr_segs,
.iov_offset = 0,
.count = count
@@ -876,14 +876,14 @@ static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
i->count -= size;
size += i->iov_offset; // from beginning of current segment
- for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
+ for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
if (likely(size < iov->iov_len))
break;
size -= iov->iov_len;
}
i->iov_offset = size;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
+ i->nr_segs -= iov - iter_iov(i);
+ i->__iov = iov;
}
void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -958,12 +958,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
unroll -= n;
}
} else { /* same logics for iovec and kvec */
- const struct iovec *iov = i->iov;
+ const struct iovec *iov = iter_iov(i);
while (1) {
size_t n = (--iov)->iov_len;
i->nr_segs++;
if (unroll <= n) {
- i->iov = iov;
+ i->__iov = iov;
i->iov_offset = n - unroll;
return;
}
@@ -980,7 +980,7 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
if (i->nr_segs > 1) {
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
- return min(i->count, i->iov->iov_len - i->iov_offset);
+ return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
}
@@ -1095,13 +1095,14 @@ static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
unsigned k;
for (k = 0; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->iov[k].iov_len - skip;
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
if (len > size)
len = size;
if (len & len_mask)
return false;
- if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
+ if ((unsigned long)(iov->iov_base + skip) & addr_mask)
return false;
size -= len;
@@ -1194,9 +1195,10 @@ static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
unsigned k;
for (k = 0; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->iov[k].iov_len - skip;
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
if (len) {
- res |= (unsigned long)i->iov[k].iov_base + skip;
+ res |= (unsigned long)iov->iov_base + skip;
if (len > size)
len = size;
res |= len;
@@ -1273,14 +1275,15 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
return ~0U;
for (k = 0; k < i->nr_segs; k++) {
- if (i->iov[k].iov_len) {
- unsigned long base = (unsigned long)i->iov[k].iov_base;
+ const struct iovec *iov = iter_iov(i) + k;
+ if (iov->iov_len) {
+ unsigned long base = (unsigned long)iov->iov_base;
if (v) // if not the first one
res |= base | v; // this start | previous end
- v = base + i->iov[k].iov_len;
- if (size <= i->iov[k].iov_len)
+ v = base + iov->iov_len;
+ if (size <= iov->iov_len)
break;
- size -= i->iov[k].iov_len;
+ size -= iov->iov_len;
}
}
return res;
@@ -1396,13 +1399,14 @@ static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
return (unsigned long)i->ubuf + i->iov_offset;
for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->iov[k].iov_len - skip;
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
if (unlikely(!len))
continue;
if (*size > len)
*size = len;
- return (unsigned long)i->iov[k].iov_base + skip;
+ return (unsigned long)iov->iov_base + skip;
}
BUG(); // if it had been empty, we wouldn't get called
}
@@ -1614,7 +1618,7 @@ static int iov_npages(const struct iov_iter *i, int maxpages)
const struct iovec *p;
int npages = 0;
- for (p = i->iov; size; skip = 0, p++) {
+ for (p = iter_iov(i); size; skip = 0, p++) {
unsigned offs = offset_in_page(p->iov_base + skip);
size_t len = min(p->iov_len - skip, size);
@@ -1691,14 +1695,14 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
flags);
else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
/* iovec and kvec have identical layout */
- return new->iov = kmemdup(new->iov,
+ return new->__iov = kmemdup(new->__iov,
new->nr_segs * sizeof(struct iovec),
flags);
return NULL;
}
EXPORT_SYMBOL(dup_iter);
-static int copy_compat_iovec_from_user(struct iovec *iov,
+static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
const struct iovec __user *uvec, unsigned long nr_segs)
{
const struct compat_iovec __user *uiov =
@@ -1731,18 +1735,35 @@ uaccess_end:
}
static int copy_iovec_from_user(struct iovec *iov,
- const struct iovec __user *uvec, unsigned long nr_segs)
+ const struct iovec __user *uiov, unsigned long nr_segs)
{
- unsigned long seg;
+ int ret = -EFAULT;
- if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
+ if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
- for (seg = 0; seg < nr_segs; seg++) {
- if ((ssize_t)iov[seg].iov_len < 0)
- return -EINVAL;
- }
- return 0;
+ do {
+ void __user *buf;
+ ssize_t len;
+
+ unsafe_get_user(len, &uiov->iov_len, uaccess_end);
+ unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
+
+ /* check for size_t not fitting in ssize_t .. */
+ if (unlikely(len < 0)) {
+ ret = -EINVAL;
+ goto uaccess_end;
+ }
+ iov->iov_base = buf;
+ iov->iov_len = len;
+
+ uiov++; iov++;
+ } while (--nr_segs);
+
+ ret = 0;
+uaccess_end:
+ user_access_end();
+ return ret;
}
struct iovec *iovec_from_user(const struct iovec __user *uvec,
@@ -1767,7 +1788,7 @@ struct iovec *iovec_from_user(const struct iovec __user *uvec,
return ERR_PTR(-ENOMEM);
}
- if (compat)
+ if (unlikely(compat))
ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
else
ret = copy_iovec_from_user(iov, uvec, nr_segs);
@@ -1780,6 +1801,30 @@ struct iovec *iovec_from_user(const struct iovec __user *uvec,
return iov;
}
+/*
+ * Single segment iovec supplied by the user, import it as ITER_UBUF.
+ */
+static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
+ struct iovec **iovp, struct iov_iter *i,
+ bool compat)
+{
+ struct iovec *iov = *iovp;
+ ssize_t ret;
+
+ if (compat)
+ ret = copy_compat_iovec_from_user(iov, uvec, 1);
+ else
+ ret = copy_iovec_from_user(iov, uvec, 1);
+ if (unlikely(ret))
+ return ret;
+
+ ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
+ if (unlikely(ret))
+ return ret;
+ *iovp = NULL;
+ return i->count;
+}
+
ssize_t __import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i, bool compat)
@@ -1788,6 +1833,9 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec,
unsigned long seg;
struct iovec *iov;
+ if (nr_segs == 1)
+ return __import_iovec_ubuf(type, uvec, iovp, i, compat);
+
iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
if (IS_ERR(iov)) {
*iovp = NULL;
@@ -1866,9 +1914,7 @@ int import_single_range(int rw, void __user *buf, size_t len,
if (unlikely(!access_ok(buf, len)))
return -EFAULT;
- iov->iov_base = buf;
- iov->iov_len = len;
- iov_iter_init(i, rw, iov, 1, len);
+ iov_iter_ubuf(i, rw, buf, len);
return 0;
}
EXPORT_SYMBOL(import_single_range);
@@ -1918,7 +1964,7 @@ void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
if (iov_iter_is_bvec(i))
i->bvec -= state->nr_segs - i->nr_segs;
else
- i->iov -= state->nr_segs - i->nr_segs;
+ i->__iov -= state->nr_segs - i->nr_segs;
i->nr_segs = state->nr_segs;
}
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index de0ee2e03ed6..b08bb1fba106 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -55,14 +55,24 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
enum kunit_status success = kunit_suite_has_succeeded(suite);
struct kunit_case *test_case;
- if (!suite || !suite->log)
+ if (!suite)
return 0;
- seq_printf(seq, "%s", suite->log);
+ /* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
+ seq_puts(seq, "KTAP version 1\n");
+ seq_puts(seq, "1..1\n");
+
+ /* Print suite header because it is not stored in the test logs. */
+ seq_puts(seq, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+ seq_printf(seq, KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name);
+ seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite));
kunit_suite_for_each_test_case(suite, test_case)
debugfs_print_result(seq, suite, test_case);
+ if (suite->log)
+ seq_printf(seq, "%s", suite->log);
+
seq_printf(seq, "%s %d %s\n",
kunit_status_to_ok_not_ok(success), 1, suite->name);
return 0;
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 4df0335d0d06..42e44caa1bdd 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -6,6 +6,7 @@
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include "try-catch-impl.h"
@@ -443,18 +444,6 @@ static struct kunit_suite kunit_resource_test_suite = {
.test_cases = kunit_resource_test_cases,
};
-static void kunit_log_test(struct kunit *test);
-
-static struct kunit_case kunit_log_test_cases[] = {
- KUNIT_CASE(kunit_log_test),
- {}
-};
-
-static struct kunit_suite kunit_log_test_suite = {
- .name = "kunit-log-test",
- .test_cases = kunit_log_test_cases,
-};
-
static void kunit_log_test(struct kunit *test)
{
struct kunit_suite suite;
@@ -481,6 +470,29 @@ static void kunit_log_test(struct kunit *test)
#endif
}
+static void kunit_log_newline_test(struct kunit *test)
+{
+ kunit_info(test, "Add newline\n");
+ if (test->log) {
+ KUNIT_ASSERT_NOT_NULL_MSG(test, strstr(test->log, "Add newline\n"),
+ "Missing log line, full log:\n%s", test->log);
+ KUNIT_EXPECT_NULL(test, strstr(test->log, "Add newline\n\n"));
+ } else {
+ kunit_skip(test, "only useful when debugfs is enabled");
+ }
+}
+
+static struct kunit_case kunit_log_test_cases[] = {
+ KUNIT_CASE(kunit_log_test),
+ KUNIT_CASE(kunit_log_newline_test),
+ {}
+};
+
+static struct kunit_suite kunit_log_test_suite = {
+ .name = "kunit-log-test",
+ .test_cases = kunit_log_test_cases,
+};
+
static void kunit_status_set_failure_test(struct kunit *test)
{
struct kunit fake;
@@ -521,7 +533,46 @@ static struct kunit_suite kunit_status_test_suite = {
.test_cases = kunit_status_test_cases,
};
+static void kunit_current_test(struct kunit *test)
+{
+ /* Check results of both current->kunit_test and
+ * kunit_get_current_test() are equivalent to current test.
+ */
+ KUNIT_EXPECT_PTR_EQ(test, test, current->kunit_test);
+ KUNIT_EXPECT_PTR_EQ(test, test, kunit_get_current_test());
+}
+
+static void kunit_current_fail_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+
+ /* Set current->kunit_test to fake test. */
+ current->kunit_test = &fake;
+
+ kunit_fail_current_test("This should make `fake` test fail.");
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+ kunit_cleanup(&fake);
+
+ /* Reset current->kunit_test to current test. */
+ current->kunit_test = test;
+}
+
+static struct kunit_case kunit_current_test_cases[] = {
+ KUNIT_CASE(kunit_current_test),
+ KUNIT_CASE(kunit_current_fail_test),
+ {}
+};
+
+static struct kunit_suite kunit_current_test_suite = {
+ .name = "kunit_current",
+ .test_cases = kunit_current_test_cases,
+};
+
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
- &kunit_log_test_suite, &kunit_status_test_suite);
+ &kunit_log_test_suite, &kunit_status_test_suite,
+ &kunit_current_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index c9e15bb60058..e2910b261112 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -108,28 +108,51 @@ static void kunit_print_test_stats(struct kunit *test,
stats.total);
}
+/**
+ * kunit_log_newline() - Add newline to the end of log if one is not
+ * already present.
+ * @log: The log to add the newline to.
+ */
+static void kunit_log_newline(char *log)
+{
+ int log_len, len_left;
+
+ log_len = strlen(log);
+ len_left = KUNIT_LOG_SIZE - log_len - 1;
+
+ if (log_len > 0 && log[log_len - 1] != '\n')
+ strncat(log, "\n", len_left);
+}
+
/*
* Append formatted message to log, size of which is limited to
* KUNIT_LOG_SIZE bytes (including null terminating byte).
*/
void kunit_log_append(char *log, const char *fmt, ...)
{
- char line[KUNIT_LOG_SIZE];
va_list args;
- int len_left;
+ int len, log_len, len_left;
if (!log)
return;
- len_left = KUNIT_LOG_SIZE - strlen(log) - 1;
+ log_len = strlen(log);
+ len_left = KUNIT_LOG_SIZE - log_len - 1;
if (len_left <= 0)
return;
+ /* Evaluate length of line to add to log */
+ va_start(args, fmt);
+ len = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+
+ /* Print formatted line to the log */
va_start(args, fmt);
- vsnprintf(line, sizeof(line), fmt, args);
+ vsnprintf(log + log_len, min(len, len_left), fmt, args);
va_end(args);
- strncat(log, line, len_left);
+ /* Add newline to end of log if not already present. */
+ kunit_log_newline(log);
}
EXPORT_SYMBOL_GPL(kunit_log_append);
@@ -147,10 +170,18 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
static void kunit_print_suite_start(struct kunit_suite *suite)
{
- kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
- kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
+ /*
+ * We do not log the test suite header as doing so would
+ * mean debugfs display would consist of the test suite
+ * header prior to individual test results.
+ * Hence directly printk the suite status, and we will
+ * separately seq_printf() the suite header for the debugfs
+ * representation.
+ */
+ pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+ pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
suite->name);
- kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
+ pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
kunit_suite_num_test_cases(suite));
}
@@ -167,10 +198,9 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
/*
* We do not log the test suite results as doing so would
- * mean debugfs display would consist of the test suite
- * description and status prior to individual test results.
- * Hence directly printk the suite status, and we will
- * separately seq_printf() the suite status for the debugfs
+ * mean debugfs display would consist of an incorrect test
+ * number. Hence directly printk the suite result, and we will
+ * separately seq_printf() the suite results for the debugfs
* representation.
*/
if (suite)
@@ -437,7 +467,6 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
struct kunit_try_catch_context context;
struct kunit_try_catch *try_catch;
- kunit_init_test(test, test_case->name, test_case->log);
try_catch = &test->try_catch;
kunit_try_catch_init(try_catch,
@@ -533,6 +562,8 @@ int kunit_run_tests(struct kunit_suite *suite)
struct kunit_result_stats param_stats = { 0 };
test_case->status = KUNIT_SKIPPED;
+ kunit_init_test(&test, test_case->name, test_case->log);
+
if (!test_case->generate_params) {
/* Non-parameterised test. */
kunit_run_case_catch_errors(suite, test_case, &test);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 5ca0d815a95d..649e687413a0 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -65,12 +65,6 @@ static void __exit libcrc32c_mod_fini(void)
crypto_free_shash(tfm);
}
-const char *crc32c_impl(void)
-{
- return crypto_shash_driver_name(tfm);
-}
-EXPORT_SYMBOL(crc32c_impl);
-
module_init(libcrc32c_mod_init);
module_exit(libcrc32c_mod_fini);
diff --git a/lib/list-test.c b/lib/list-test.c
index d374cf5d1a57..0cc27de9cec8 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -8,6 +8,7 @@
#include <kunit/test.h>
#include <linux/list.h>
+#include <linux/klist.h>
struct list_test_struct {
int data;
@@ -1199,6 +1200,303 @@ static struct kunit_suite hlist_test_module = {
.test_cases = hlist_test_cases,
};
-kunit_test_suites(&list_test_module, &hlist_test_module);
+
+struct klist_test_struct {
+ int data;
+ struct klist klist;
+ struct klist_node klist_node;
+};
+
+static int node_count;
+static struct klist_node *last_node;
+
+static void check_node(struct klist_node *node_ptr)
+{
+ node_count++;
+ last_node = node_ptr;
+}
+
+static void check_delete_node(struct klist_node *node_ptr)
+{
+ node_count--;
+ last_node = node_ptr;
+}
+
+static void klist_test_add_tail(struct kunit *test)
+{
+ struct klist_node a, b;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_tail(&a, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
+
+ klist_add_tail(&b, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 2);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
+
+ /* should be [list] -> a -> b */
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_head(struct kunit *test)
+{
+ struct klist_node a, b;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
+
+ klist_add_head(&b, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 2);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
+
+ /* should be [list] -> b -> a */
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_behind(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ klist_add_head(&b, &mylist);
+
+ klist_add_behind(&c, &a);
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ klist_add_behind(&d, &b);
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_init(&mylist, &i);
+
+ /* should be [list] -> b -> d -> a -> c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_before(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ klist_add_head(&b, &mylist);
+ klist_add_before(&c, &a);
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ klist_add_before(&d, &b);
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_init(&mylist, &i);
+
+ /* should be [list] -> b -> d -> a -> c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+/*
+ * Verify that klist_del() delays the deletion of a node until there
+ * are no other references to it
+ */
+static void klist_test_del_refcount_greater_than_zero(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ /* Advance the iterator to point to node c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+
+ /* Try to delete node c while there is a reference to it*/
+ klist_del(&c);
+
+ /*
+ * Verify that node c is still attached to the list even after being
+ * deleted. Since the iterator still points to c, the reference count is not
+ * decreased to 0
+ */
+ KUNIT_EXPECT_TRUE(test, klist_node_attached(&c));
+
+ /* Check that node c has not been removed yet*/
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_exit(&i);
+
+ /*
+ * Since the iterator is no longer pointing to node c, node c is removed
+ * from the list
+ */
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+}
+
+/*
+ * Verify that klist_del() deletes a node immediately when there are no
+ * other references to it.
+ */
+static void klist_test_del_refcount_zero(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+ /* Delete node c*/
+ klist_del(&c);
+
+ /* Check that node c is deleted from the list*/
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ /* Should be [list] -> a -> b -> d*/
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_remove(struct kunit *test)
+{
+ /* This test doesn't check correctness under concurrent access */
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+ /* Delete node c*/
+ klist_remove(&c);
+
+ /* Check the nodes in the list*/
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ /* should be [list] -> a -> b -> d*/
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_node_attached(struct kunit *test)
+{
+ struct klist_node a = {};
+ struct klist mylist;
+
+ klist_init(&mylist, NULL, NULL);
+
+ KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
+ klist_add_head(&a, &mylist);
+ KUNIT_EXPECT_TRUE(test, klist_node_attached(&a));
+ klist_del(&a);
+ KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
+
+}
+
+static struct kunit_case klist_test_cases[] = {
+ KUNIT_CASE(klist_test_add_tail),
+ KUNIT_CASE(klist_test_add_head),
+ KUNIT_CASE(klist_test_add_behind),
+ KUNIT_CASE(klist_test_add_before),
+ KUNIT_CASE(klist_test_del_refcount_greater_than_zero),
+ KUNIT_CASE(klist_test_del_refcount_zero),
+ KUNIT_CASE(klist_test_remove),
+ KUNIT_CASE(klist_test_node_attached),
+ {},
+};
+
+static struct kunit_suite klist_test_module = {
+ .name = "klist",
+ .test_cases = klist_test_cases,
+};
+
+kunit_test_suites(&list_test_module, &hlist_test_module, &klist_test_module);
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index de4ee0d50906..cd2bdba6d3ed 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -334,7 +334,7 @@ kvfree_rcu_1_arg_vmalloc_test(void)
return -1;
p->array[0] = 'a';
- kvfree_rcu(p);
+ kvfree_rcu_mightsleep(p);
}
return 0;
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
index e814061d6aa0..9f031eafc465 100644
--- a/lib/vdso/Makefile
+++ b/lib/vdso/Makefile
@@ -5,18 +5,13 @@ GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
-# This cmd checks that the vdso library does not contain absolute relocation
+# This cmd checks that the vdso library does not contain dynamic relocations.
# It has to be called after the linking of the vdso library and requires it
# as a parameter.
#
-# $(ARCH_REL_TYPE_ABS) is defined in the arch specific makefile and corresponds
-# to the absolute relocation types printed by "objdump -R" and accepted by the
-# dynamic linker.
-ifndef ARCH_REL_TYPE_ABS
-$(error ARCH_REL_TYPE_ABS is not set)
-endif
-
+# As a workaround for some GNU ld ports which produce unneeded R_*_NONE
+# dynamic relocations, ignore R_*_NONE.
quiet_cmd_vdso_check = VDSOCHK $@
- cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \
+ cmd_vdso_check = if $(READELF) -rW $@ | grep -v _NONE | grep -q " R_\w*_"; \
then (echo >&2 "$@: dynamic relocations are not supported"; \
rm -f $@; /bin/false); fi
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index be71a03c936a..426418253fd4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -3621,7 +3621,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
if (!digit
|| (base == 16 && !isxdigit(digit))
|| (base == 10 && !isdigit(digit))
- || (base == 8 && (!isdigit(digit) || digit > '7'))
+ || (base == 8 && !isodigit(digit))
|| (base == 0 && !isdigit(digit)))
break;