diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/alloc_tag.c | 31 | ||||
-rw-r--r-- | lib/codetag.c | 17 | ||||
-rw-r--r-- | lib/find_bit.c | 24 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 20 | ||||
-rw-r--r-- | lib/maple_tree.c | 40 | ||||
-rw-r--r-- | lib/ref_tracker.c | 289 | ||||
-rw-r--r-- | lib/test_hmm.c | 14 | ||||
-rw-r--r-- | lib/test_maple_tree.c | 32 | ||||
-rw-r--r-- | lib/test_objagg.c | 77 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 42 | ||||
-rw-r--r-- | lib/tests/test_bits.c | 19 | ||||
-rw-r--r-- | lib/xarray.c | 3 |
12 files changed, 503 insertions, 105 deletions
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 0142bc916f73..e9b33848700a 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -25,8 +25,10 @@ static bool mem_profiling_support; static struct codetag_type *alloc_tag_cttype; +#ifdef CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); EXPORT_SYMBOL(_shared_alloc_tag); +#endif DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, mem_alloc_profiling_key); @@ -46,21 +48,16 @@ struct allocinfo_private { static void *allocinfo_start(struct seq_file *m, loff_t *pos) { struct allocinfo_private *priv; - struct codetag *ct; loff_t node = *pos; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - m->private = priv; - if (!priv) - return NULL; - - priv->print_header = (node == 0); + priv = (struct allocinfo_private *)m->private; codetag_lock_module_list(alloc_tag_cttype, true); - priv->iter = codetag_get_ct_iter(alloc_tag_cttype); - while ((ct = codetag_next_ct(&priv->iter)) != NULL && node) - node--; - - return ct ? priv : NULL; + if (node == 0) { + priv->print_header = true; + priv->iter = codetag_get_ct_iter(alloc_tag_cttype); + codetag_next_ct(&priv->iter); + } + return priv->iter.ct ? priv : NULL; } static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) @@ -77,12 +74,7 @@ static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) static void allocinfo_stop(struct seq_file *m, void *arg) { - struct allocinfo_private *priv = (struct allocinfo_private *)m->private; - - if (priv) { - codetag_lock_module_list(alloc_tag_cttype, false); - kfree(priv); - } + codetag_lock_module_list(alloc_tag_cttype, false); } static void print_allocinfo_header(struct seq_buf *buf) @@ -820,7 +812,8 @@ static int __init alloc_tag_init(void) return 0; } - if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { + if (!proc_create_seq_private(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op, + sizeof(struct allocinfo_private), NULL)) { pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); shutdown_mem_profiling(false); return -ENOMEM; diff --git a/lib/codetag.c b/lib/codetag.c index 650d54d7e14d..545911cebd25 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -11,8 +11,14 @@ struct codetag_type { struct list_head link; unsigned int count; struct idr mod_idr; - struct rw_semaphore mod_lock; /* protects mod_idr */ + /* + * protects mod_idr, next_mod_seq, + * iter->mod_seq and cmod->mod_seq + */ + struct rw_semaphore mod_lock; struct codetag_type_desc desc; + /* generates unique sequence number for module load */ + unsigned long next_mod_seq; }; struct codetag_range { @@ -23,6 +29,7 @@ struct codetag_range { struct codetag_module { struct module *mod; struct codetag_range range; + unsigned long mod_seq; }; static DEFINE_MUTEX(codetag_lock); @@ -48,6 +55,7 @@ struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype) .cmod = NULL, .mod_id = 0, .ct = NULL, + .mod_seq = 0, }; return iter; @@ -91,11 +99,13 @@ struct codetag *codetag_next_ct(struct codetag_iterator *iter) if (!cmod) break; - if (cmod != iter->cmod) { + if (!iter->cmod || iter->mod_seq != cmod->mod_seq) { iter->cmod = cmod; + iter->mod_seq = cmod->mod_seq; ct = get_first_module_ct(cmod); - } else + } else { ct = get_next_module_ct(iter); + } if (ct) break; @@ -191,6 +201,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) cmod->range = range; down_write(&cttype->mod_lock); + cmod->mod_seq = ++cttype->next_mod_seq; mod_id = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL); if (mod_id >= 0) { if (cttype->desc.module_load) { diff --git a/lib/find_bit.c b/lib/find_bit.c index 06b6342aa3ae..d4b5a29e3e72 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -18,6 +18,7 @@ #include <linux/math.h> #include <linux/minmax.h> #include <linux/swab.h> +#include <linux/random.h> /* * Common helper for find_bit() function family @@ -291,3 +292,26 @@ EXPORT_SYMBOL(_find_next_bit_le); #endif #endif /* __BIG_ENDIAN */ + +/** + * find_random_bit - find a set bit at random position + * @addr: The address to base the search on + * @size: The bitmap size in bits + * + * Returns: a position of a random set bit; >= @size otherwise + */ +unsigned long find_random_bit(const unsigned long *addr, unsigned long size) +{ + int w = bitmap_weight(addr, size); + + switch (w) { + case 0: + return size; + case 1: + /* Performance trick for single-bit bitmaps */ + return find_first_bit(addr, size); + default: + return find_nth_bit(addr, size, get_random_u32_below(w)); + } +} +EXPORT_SYMBOL(find_random_bit); diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index b7f2fa08d9c8..78e16b95d210 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -826,3 +826,23 @@ static int __init kobject_uevent_init(void) postcore_initcall(kobject_uevent_init); #endif + +#ifdef CONFIG_UEVENT_HELPER +static const struct ctl_table uevent_helper_sysctl_table[] = { + { + .procname = "hotplug", + .data = &uevent_helper, + .maxlen = UEVENT_HELPER_PATH_LEN, + .mode = 0644, + .proc_handler = proc_dostring, + }, +}; + +static int __init init_uevent_helper_sysctl(void) +{ + register_sysctl_init("kernel", uevent_helper_sysctl_table); + return 0; +} + +postcore_initcall(init_uevent_helper_sysctl); +#endif diff --git a/lib/maple_tree.c b/lib/maple_tree.c index ef66be963798..b4ee2d29d7a9 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1053,7 +1053,7 @@ static inline void mte_set_gap(const struct maple_enode *mn, * mas_ascend() - Walk up a level of the tree. * @mas: The maple state * - * Sets the @mas->max and @mas->min to the correct values when walking up. This + * Sets the @mas->max and @mas->min for the parent node of mas->node. This * may cause several levels of walking up to find the correct min and max. * May find a dead node which will cause a premature return. * Return: 1 on dead node, 0 otherwise @@ -1098,6 +1098,12 @@ static int mas_ascend(struct ma_state *mas) min = 0; max = ULONG_MAX; + + /* + * !mas->offset implies that parent node min == mas->min. + * mas->offset > 0 implies that we need to walk up to find the + * implied pivot min. + */ if (!mas->offset) { min = mas->min; set_min = true; @@ -4560,15 +4566,12 @@ again: if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; - if (likely(entry)) return entry; if (!empty) { - if (mas->index <= min) { - mas->status = ma_underflow; - return NULL; - } + if (mas->index <= min) + goto underflow; goto again; } @@ -4930,7 +4933,7 @@ void *mas_walk(struct ma_state *mas) { void *entry; - if (!mas_is_active(mas) || !mas_is_start(mas)) + if (!mas_is_active(mas) && !mas_is_start(mas)) mas->status = ma_start; retry: entry = mas_state_walk(mas); @@ -5659,6 +5662,17 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) } EXPORT_SYMBOL_GPL(mas_expected_entries); +static void mas_may_activate(struct ma_state *mas) +{ + if (!mas->node) { + mas->status = ma_start; + } else if (mas->index > mas->max || mas->index < mas->min) { + mas->status = ma_start; + } else { + mas->status = ma_active; + } +} + static bool mas_next_setup(struct ma_state *mas, unsigned long max, void **entry) { @@ -5682,11 +5696,11 @@ static bool mas_next_setup(struct ma_state *mas, unsigned long max, break; case ma_overflow: /* Overflowed before, but the max changed */ - mas->status = ma_active; + mas_may_activate(mas); break; case ma_underflow: /* The user expects the mas to be one before where it is */ - mas->status = ma_active; + mas_may_activate(mas); *entry = mas_walk(mas); if (*entry) return true; @@ -5807,11 +5821,11 @@ static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry break; case ma_underflow: /* underflowed before but the min changed */ - mas->status = ma_active; + mas_may_activate(mas); break; case ma_overflow: /* User expects mas to be one after where it is */ - mas->status = ma_active; + mas_may_activate(mas); *entry = mas_walk(mas); if (*entry) return true; @@ -5976,7 +5990,7 @@ static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long m return true; } - mas->status = ma_active; + mas_may_activate(mas); *entry = mas_walk(mas); if (*entry) return true; @@ -5985,7 +5999,7 @@ static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long m if (unlikely(mas->last >= max)) return true; - mas->status = ma_active; + mas_may_activate(mas); *entry = mas_walk(mas); if (*entry) return true; diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index cf5609b1ca79..a9e6ffcff04b 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> +#include <linux/seq_file.h> #define REF_TRACKER_STACK_ENTRIES 16 #define STACK_BUF_SIZE 1024 @@ -28,6 +29,45 @@ struct ref_tracker_dir_stats { } stacks[]; }; +#ifdef CONFIG_DEBUG_FS +#include <linux/xarray.h> + +/* + * ref_tracker_dir_init() is usually called in allocation-safe contexts, but + * the same is not true of ref_tracker_dir_exit() which can be called from + * anywhere an object is freed. Removing debugfs dentries is a blocking + * operation, so we defer that work to the debugfs_reap_worker. + * + * Each dentry is tracked in the appropriate xarray. When + * ref_tracker_dir_exit() is called, its entries in the xarrays are marked and + * the workqueue job is scheduled. The worker then runs and deletes any marked + * dentries asynchronously. + */ +static struct xarray debugfs_dentries; +static struct xarray debugfs_symlinks; +static struct work_struct debugfs_reap_worker; + +#define REF_TRACKER_DIR_DEAD XA_MARK_0 +static inline void ref_tracker_debugfs_mark(struct ref_tracker_dir *dir) +{ + unsigned long flags; + + xa_lock_irqsave(&debugfs_dentries, flags); + __xa_set_mark(&debugfs_dentries, (unsigned long)dir, REF_TRACKER_DIR_DEAD); + xa_unlock_irqrestore(&debugfs_dentries, flags); + + xa_lock_irqsave(&debugfs_symlinks, flags); + __xa_set_mark(&debugfs_symlinks, (unsigned long)dir, REF_TRACKER_DIR_DEAD); + xa_unlock_irqrestore(&debugfs_symlinks, flags); + + schedule_work(&debugfs_reap_worker); +} +#else +static inline void ref_tracker_debugfs_mark(struct ref_tracker_dir *dir) +{ +} +#endif + static struct ref_tracker_dir_stats * ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) { @@ -63,21 +103,39 @@ ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) } struct ostream { + void __ostream_printf (*func)(struct ostream *stream, char *fmt, ...); + char *prefix; char *buf; + struct seq_file *seq; int size, used; }; +static void __ostream_printf pr_ostream_log(struct ostream *stream, char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); +} + +static void __ostream_printf pr_ostream_buf(struct ostream *stream, char *fmt, ...) +{ + int ret, len = stream->size - stream->used; + va_list args; + + va_start(args, fmt); + ret = vsnprintf(stream->buf + stream->used, len, fmt, args); + va_end(args); + if (ret > 0) + stream->used += min(ret, len); +} + #define pr_ostream(stream, fmt, args...) \ ({ \ struct ostream *_s = (stream); \ \ - if (!_s->buf) { \ - pr_err(fmt, ##args); \ - } else { \ - int ret, len = _s->size - _s->used; \ - ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \ - _s->used += min(ret, len); \ - } \ + _s->func(_s, fmt, ##args); \ }) static void @@ -96,8 +154,8 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, stats = ref_tracker_get_stats(dir, display_limit); if (IS_ERR(stats)) { - pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n", - dir->name, dir, stats); + pr_ostream(s, "%s%s@%p: couldn't get stats, error %pe\n", + s->prefix, dir->class, dir, stats); return; } @@ -107,14 +165,15 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, stack = stats->stacks[i].stack_handle; if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) sbuf[0] = 0; - pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir, - stats->stacks[i].count, stats->total, sbuf); + pr_ostream(s, "%s%s@%p has %d/%d users at\n%s\n", s->prefix, + dir->class, dir, stats->stacks[i].count, + stats->total, sbuf); skipped -= stats->stacks[i].count; } if (skipped) - pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n", - dir->name, dir, skipped, stats->total); + pr_ostream(s, "%s%s@%p skipped reports about %d/%d users.\n", + s->prefix, dir->class, dir, skipped, stats->total); kfree(sbuf); @@ -124,7 +183,8 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, unsigned int display_limit) { - struct ostream os = {}; + struct ostream os = { .func = pr_ostream_log, + .prefix = "ref_tracker: " }; __ref_tracker_dir_pr_ostream(dir, display_limit, &os); } @@ -143,7 +203,10 @@ EXPORT_SYMBOL(ref_tracker_dir_print); int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size) { - struct ostream os = { .buf = buf, .size = size }; + struct ostream os = { .func = pr_ostream_buf, + .prefix = "ref_tracker: ", + .buf = buf, + .size = size }; unsigned long flags; spin_lock_irqsave(&dir->lock, flags); @@ -161,6 +224,11 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir) bool leak = false; dir->dead = true; + /* + * The xarray entries must be marked before the dir->lock is taken to + * protect simultaneous debugfs readers. + */ + ref_tracker_debugfs_mark(dir); spin_lock_irqsave(&dir->lock, flags); list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { list_del(&tracker->head); @@ -273,3 +341,194 @@ int ref_tracker_free(struct ref_tracker_dir *dir, return 0; } EXPORT_SYMBOL_GPL(ref_tracker_free); + +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> + +static struct dentry *ref_tracker_debug_dir = (struct dentry *)-ENOENT; + +static void __ostream_printf pr_ostream_seq(struct ostream *stream, char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + seq_vprintf(stream->seq, fmt, args); + va_end(args); +} + +static int ref_tracker_dir_seq_print(struct ref_tracker_dir *dir, struct seq_file *seq) +{ + struct ostream os = { .func = pr_ostream_seq, + .prefix = "", + .seq = seq }; + + __ref_tracker_dir_pr_ostream(dir, 16, &os); + + return os.used; +} + +static int ref_tracker_debugfs_show(struct seq_file *f, void *v) +{ + struct ref_tracker_dir *dir = f->private; + unsigned long index = (unsigned long)dir; + unsigned long flags; + int ret; + + /* + * "dir" may not exist at this point if ref_tracker_dir_exit() has + * already been called. Take care not to dereference it until its + * legitimacy is established. + * + * The xa_lock is necessary to ensure that "dir" doesn't disappear + * before its lock can be taken. If it's in the hash and not marked + * dead, then it's safe to take dir->lock which prevents + * ref_tracker_dir_exit() from completing. Once the dir->lock is + * acquired, the xa_lock can be released. All of this must be IRQ-safe. + */ + xa_lock_irqsave(&debugfs_dentries, flags); + if (!xa_load(&debugfs_dentries, index) || + xa_get_mark(&debugfs_dentries, index, REF_TRACKER_DIR_DEAD)) { + xa_unlock_irqrestore(&debugfs_dentries, flags); + return -ENODATA; + } + + spin_lock(&dir->lock); + xa_unlock(&debugfs_dentries); + ret = ref_tracker_dir_seq_print(dir, f); + spin_unlock_irqrestore(&dir->lock, flags); + return ret; +} + +static int ref_tracker_debugfs_open(struct inode *inode, struct file *filp) +{ + struct ref_tracker_dir *dir = inode->i_private; + + return single_open(filp, ref_tracker_debugfs_show, dir); +} + +static const struct file_operations ref_tracker_debugfs_fops = { + .owner = THIS_MODULE, + .open = ref_tracker_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** + * ref_tracker_dir_debugfs - create debugfs file for ref_tracker_dir + * @dir: ref_tracker_dir to be associated with debugfs file + * + * In most cases, a debugfs file will be created automatically for every + * ref_tracker_dir. If the object was created before debugfs is brought up + * then that may fail. In those cases, it is safe to call this at a later + * time to create the file. + */ +void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir) +{ + char name[NAME_MAX + 1]; + struct dentry *dentry; + int ret; + + /* No-op if already created */ + dentry = xa_load(&debugfs_dentries, (unsigned long)dir); + if (dentry && !xa_is_err(dentry)) + return; + + ret = snprintf(name, sizeof(name), "%s@%px", dir->class, dir); + name[sizeof(name) - 1] = '\0'; + + if (ret < sizeof(name)) { + dentry = debugfs_create_file(name, S_IFREG | 0400, + ref_tracker_debug_dir, dir, + &ref_tracker_debugfs_fops); + if (!IS_ERR(dentry)) { + void *old; + + old = xa_store_irq(&debugfs_dentries, (unsigned long)dir, + dentry, GFP_KERNEL); + + if (xa_is_err(old)) + debugfs_remove(dentry); + else + WARN_ON_ONCE(old); + } + } +} +EXPORT_SYMBOL(ref_tracker_dir_debugfs); + +void __ostream_printf ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...) +{ + char name[NAME_MAX + 1]; + struct dentry *symlink, *dentry; + va_list args; + int ret; + + symlink = xa_load(&debugfs_symlinks, (unsigned long)dir); + dentry = xa_load(&debugfs_dentries, (unsigned long)dir); + + /* Already created?*/ + if (symlink && !xa_is_err(symlink)) + return; + + if (!dentry || xa_is_err(dentry)) + return; + + va_start(args, fmt); + ret = vsnprintf(name, sizeof(name), fmt, args); + va_end(args); + name[sizeof(name) - 1] = '\0'; + + if (ret < sizeof(name)) { + symlink = debugfs_create_symlink(name, ref_tracker_debug_dir, + dentry->d_name.name); + if (!IS_ERR(symlink)) { + void *old; + + old = xa_store_irq(&debugfs_symlinks, (unsigned long)dir, + symlink, GFP_KERNEL); + if (xa_is_err(old)) + debugfs_remove(symlink); + else + WARN_ON_ONCE(old); + } + } +} +EXPORT_SYMBOL(ref_tracker_dir_symlink); + +static void debugfs_reap_work(struct work_struct *work) +{ + struct dentry *dentry; + unsigned long index; + bool reaped; + + do { + reaped = false; + xa_for_each_marked(&debugfs_symlinks, index, dentry, REF_TRACKER_DIR_DEAD) { + xa_erase_irq(&debugfs_symlinks, index); + debugfs_remove(dentry); + reaped = true; + } + xa_for_each_marked(&debugfs_dentries, index, dentry, REF_TRACKER_DIR_DEAD) { + xa_erase_irq(&debugfs_dentries, index); + debugfs_remove(dentry); + reaped = true; + } + } while (reaped); +} + +static int __init ref_tracker_debugfs_postcore_init(void) +{ + INIT_WORK(&debugfs_reap_worker, debugfs_reap_work); + xa_init_flags(&debugfs_dentries, XA_FLAGS_LOCK_IRQ); + xa_init_flags(&debugfs_symlinks, XA_FLAGS_LOCK_IRQ); + return 0; +} +postcore_initcall(ref_tracker_debugfs_postcore_init); + +static int __init ref_tracker_debugfs_late_init(void) +{ + ref_tracker_debug_dir = debugfs_create_dir("ref_tracker", NULL); + return 0; +} +late_initcall(ref_tracker_debugfs_late_init); +#endif /* CONFIG_DEBUG_FS */ diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 5b144bc5c4ec..761725bc713c 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -330,7 +330,7 @@ static int dmirror_fault(struct dmirror *dmirror, unsigned long start, { struct mm_struct *mm = dmirror->notifier.mm; unsigned long addr; - unsigned long pfns[64]; + unsigned long pfns[32]; struct hmm_range range = { .notifier = &dmirror->notifier, .hmm_pfns = pfns, @@ -879,8 +879,8 @@ static int dmirror_migrate_to_system(struct dmirror *dmirror, unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; struct vm_area_struct *vma; - unsigned long src_pfns[64] = { 0 }; - unsigned long dst_pfns[64] = { 0 }; + unsigned long src_pfns[32] = { 0 }; + unsigned long dst_pfns[32] = { 0 }; struct migrate_vma args = { 0 }; unsigned long next; int ret; @@ -939,8 +939,8 @@ static int dmirror_migrate_to_device(struct dmirror *dmirror, unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; struct vm_area_struct *vma; - unsigned long src_pfns[64] = { 0 }; - unsigned long dst_pfns[64] = { 0 }; + unsigned long src_pfns[32] = { 0 }; + unsigned long dst_pfns[32] = { 0 }; struct dmirror_bounce bounce; struct migrate_vma args = { 0 }; unsigned long next; @@ -1144,8 +1144,8 @@ static int dmirror_snapshot(struct dmirror *dmirror, unsigned long size = cmd->npages << PAGE_SHIFT; unsigned long addr; unsigned long next; - unsigned long pfns[64]; - unsigned char perm[64]; + unsigned long pfns[32]; + unsigned char perm[32]; char __user *uptr; struct hmm_range range = { .hmm_pfns = pfns, diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 13e2a10d7554..cb3936595b0d 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -3177,6 +3177,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) void *entry, *ptr = (void *) 0x1234500; void *ptr2 = &ptr; void *ptr3 = &ptr2; + unsigned long index; /* Check MAS_ROOT First */ mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL); @@ -3707,6 +3708,37 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, !mas_is_active(&mas)); mas_unlock(&mas); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mas_lock(&mas); + for (int count = 0; count < 30; count++) { + mas_set(&mas, count); + mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL); + } + + /* Ensure mas_find works with MA_UNDERFLOW */ + mas_set(&mas, 0); + entry = mas_walk(&mas); + mas_set(&mas, 0); + mas_prev(&mas, 0); + MT_BUG_ON(mt, mas.status != ma_underflow); + MT_BUG_ON(mt, mas_find(&mas, ULONG_MAX) != entry); + + /* Restore active on mas_next */ + entry = mas_next(&mas, ULONG_MAX); + index = mas.index; + mas_prev(&mas, index); + MT_BUG_ON(mt, mas.status != ma_underflow); + MT_BUG_ON(mt, mas_next(&mas, ULONG_MAX) != entry); + + /* Ensure overflow -> active works */ + mas_prev(&mas, 0); + mas_next(&mas, index - 1); + MT_BUG_ON(mt, mas.status != ma_overflow); + MT_BUG_ON(mt, mas_next(&mas, ULONG_MAX) != entry); + + mas_unlock(&mas); } static noinline void __init alloc_cyclic_testing(struct maple_tree *mt) diff --git a/lib/test_objagg.c b/lib/test_objagg.c index 222b39fc2629..ce5c4c36a084 100644 --- a/lib/test_objagg.c +++ b/lib/test_objagg.c @@ -908,50 +908,22 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints, return err; } -static int test_hints_case(const struct hints_case *hints_case) +static int test_hints_case2(const struct hints_case *hints_case, + struct objagg_hints *hints, struct objagg *objagg) { struct objagg_obj *objagg_obj; - struct objagg_hints *hints; struct world world2 = {}; - struct world world = {}; struct objagg *objagg2; - struct objagg *objagg; const char *errmsg; int i; int err; - objagg = objagg_create(&delta_ops, NULL, &world); - if (IS_ERR(objagg)) - return PTR_ERR(objagg); - - for (i = 0; i < hints_case->key_ids_count; i++) { - objagg_obj = world_obj_get(&world, objagg, - hints_case->key_ids[i]); - if (IS_ERR(objagg_obj)) { - err = PTR_ERR(objagg_obj); - goto err_world_obj_get; - } - } - - pr_debug_stats(objagg); - err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg); - if (err) { - pr_err("Stats: %s\n", errmsg); - goto err_check_expect_stats; - } - - hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY); - if (IS_ERR(hints)) { - err = PTR_ERR(hints); - goto err_hints_get; - } - pr_debug_hints_stats(hints); err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints, &errmsg); if (err) { pr_err("Hints stats: %s\n", errmsg); - goto err_check_expect_hints_stats; + return err; } objagg2 = objagg_create(&delta_ops, hints, &world2); @@ -983,7 +955,48 @@ err_world2_obj_get: world_obj_put(&world2, objagg, hints_case->key_ids[i]); i = hints_case->key_ids_count; objagg_destroy(objagg2); -err_check_expect_hints_stats: + + return err; +} + +static int test_hints_case(const struct hints_case *hints_case) +{ + struct objagg_obj *objagg_obj; + struct objagg_hints *hints; + struct world world = {}; + struct objagg *objagg; + const char *errmsg; + int i; + int err; + + objagg = objagg_create(&delta_ops, NULL, &world); + if (IS_ERR(objagg)) + return PTR_ERR(objagg); + + for (i = 0; i < hints_case->key_ids_count; i++) { + objagg_obj = world_obj_get(&world, objagg, + hints_case->key_ids[i]); + if (IS_ERR(objagg_obj)) { + err = PTR_ERR(objagg_obj); + goto err_world_obj_get; + } + } + + pr_debug_stats(objagg); + err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg); + if (err) { + pr_err("Stats: %s\n", errmsg); + goto err_check_expect_stats; + } + + hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY); + if (IS_ERR(hints)) { + err = PTR_ERR(hints); + goto err_hints_get; + } + + err = test_hints_case2(hints_case, hints, objagg); + objagg_hints_put(hints); err_hints_get: err_check_expect_stats: diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 1b0b59549aaf..2815658ccc37 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -41,7 +41,7 @@ __param(int, nr_pages, 0, __param(bool, use_huge, false, "Use vmalloc_huge in fix_size_alloc_test"); -__param(int, run_test_mask, INT_MAX, +__param(int, run_test_mask, 7, "Set tests specified in the mask.\n\n" "\t\tid: 1, name: fix_size_alloc_test\n" "\t\tid: 2, name: full_fit_alloc_test\n" @@ -396,25 +396,27 @@ cleanup: struct test_case_desc { const char *test_name; int (*test_func)(void); + bool xfail; }; static struct test_case_desc test_case_array[] = { - { "fix_size_alloc_test", fix_size_alloc_test }, - { "full_fit_alloc_test", full_fit_alloc_test }, - { "long_busy_list_alloc_test", long_busy_list_alloc_test }, - { "random_size_alloc_test", random_size_alloc_test }, - { "fix_align_alloc_test", fix_align_alloc_test }, - { "random_size_align_alloc_test", random_size_align_alloc_test }, - { "align_shift_alloc_test", align_shift_alloc_test }, - { "pcpu_alloc_test", pcpu_alloc_test }, - { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, - { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, - { "vm_map_ram_test", vm_map_ram_test }, + { "fix_size_alloc_test", fix_size_alloc_test, }, + { "full_fit_alloc_test", full_fit_alloc_test, }, + { "long_busy_list_alloc_test", long_busy_list_alloc_test, }, + { "random_size_alloc_test", random_size_alloc_test, }, + { "fix_align_alloc_test", fix_align_alloc_test, }, + { "random_size_align_alloc_test", random_size_align_alloc_test, }, + { "align_shift_alloc_test", align_shift_alloc_test, true }, + { "pcpu_alloc_test", pcpu_alloc_test, }, + { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test, }, + { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test, }, + { "vm_map_ram_test", vm_map_ram_test, }, /* Add a new test case here. */ }; struct test_case_data { int test_failed; + int test_xfailed; int test_passed; u64 time; }; @@ -444,7 +446,7 @@ static int test_func(void *private) { struct test_driver *t = private; int random_array[ARRAY_SIZE(test_case_array)]; - int index, i, j; + int index, i, j, ret; ktime_t kt; u64 delta; @@ -468,11 +470,14 @@ static int test_func(void *private) */ if (!((run_test_mask & (1 << index)) >> index)) continue; - kt = ktime_get(); for (j = 0; j < test_repeat_count; j++) { - if (!test_case_array[index].test_func()) + ret = test_case_array[index].test_func(); + + if (!ret && !test_case_array[index].xfail) t->data[index].test_passed++; + else if (ret && test_case_array[index].xfail) + t->data[index].test_xfailed++; else t->data[index].test_failed++; } @@ -576,10 +581,11 @@ static void do_concurrent_test(void) continue; pr_info( - "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n", + "Summary: %s passed: %d failed: %d xfailed: %d repeat: %d loops: %d avg: %llu usec\n", test_case_array[j].test_name, t->data[j].test_passed, t->data[j].test_failed, + t->data[j].test_xfailed, test_repeat_count, test_loop_count, t->data[j].time); } @@ -598,7 +604,11 @@ static int __init vmalloc_test_init(void) return IS_BUILTIN(CONFIG_TEST_VMALLOC) ? 0:-EAGAIN; } +#ifdef MODULE module_init(vmalloc_test_init) +#else +late_initcall(vmalloc_test_init); +#endif MODULE_LICENSE("GPL"); MODULE_AUTHOR("Uladzislau Rezki"); diff --git a/lib/tests/test_bits.c b/lib/tests/test_bits.c index 47325b41515f..ab88e50d2edf 100644 --- a/lib/tests/test_bits.c +++ b/lib/tests/test_bits.c @@ -26,6 +26,23 @@ static_assert(assert_type(u16, GENMASK_U16(15, 0)) == U16_MAX); static_assert(assert_type(u32, GENMASK_U32(31, 0)) == U32_MAX); static_assert(assert_type(u64, GENMASK_U64(63, 0)) == U64_MAX); +/* FIXME: add a test case written in asm for GENMASK() and GENMASK_ULL() */ + +static void __genmask_test(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, 1ul, __GENMASK(0, 0)); + KUNIT_EXPECT_EQ(test, 3ul, __GENMASK(1, 0)); + KUNIT_EXPECT_EQ(test, 6ul, __GENMASK(2, 1)); + KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, __GENMASK(31, 0)); +} + +static void __genmask_ull_test(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, 1ull, __GENMASK_ULL(0, 0)); + KUNIT_EXPECT_EQ(test, 3ull, __GENMASK_ULL(1, 0)); + KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, __GENMASK_ULL(39, 21)); + KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, __GENMASK_ULL(63, 0)); +} static void genmask_test(struct kunit *test) { @@ -123,6 +140,8 @@ static void genmask_input_check_test(struct kunit *test) static struct kunit_case bits_test_cases[] = { + KUNIT_CASE(__genmask_test), + KUNIT_CASE(__genmask_ull_test), KUNIT_CASE(genmask_test), KUNIT_CASE(genmask_ull_test), KUNIT_CASE(genmask_u128_test), diff --git a/lib/xarray.c b/lib/xarray.c index 76dde3a1cacf..ae3d80f4b4ee 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1910,6 +1910,7 @@ EXPORT_SYMBOL(xa_store_range); * @xas: XArray operation state. * * Called after xas_load, the xas should not be in an error state. + * The xas should not be pointing to a sibling entry. * * Return: A number between 0 and 63 indicating the order of the entry. */ @@ -1920,6 +1921,8 @@ int xas_get_order(struct xa_state *xas) if (!xas->xa_node) return 0; + XA_NODE_BUG_ON(xas->xa_node, xa_is_sibling(xa_entry(xas->xa, + xas->xa_node, xas->xa_offset))); for (;;) { unsigned int slot = xas->xa_offset + (1 << order); |