diff options
Diffstat (limited to 'drivers/android')
-rw-r--r-- | drivers/android/Kconfig | 15 | ||||
-rw-r--r-- | drivers/android/Makefile | 2 | ||||
-rw-r--r-- | drivers/android/binder.c | 71 | ||||
-rw-r--r-- | drivers/android/binder_alloc.c | 53 | ||||
-rw-r--r-- | drivers/android/binder_alloc.h | 22 | ||||
-rw-r--r-- | drivers/android/binder_alloc_selftest.c | 306 | ||||
-rw-r--r-- | drivers/android/binder_internal.h | 6 | ||||
-rw-r--r-- | drivers/android/binder_trace.h | 21 | ||||
-rw-r--r-- | drivers/android/binderfs.c | 20 | ||||
-rw-r--r-- | drivers/android/tests/.kunitconfig | 7 | ||||
-rw-r--r-- | drivers/android/tests/Makefile | 6 | ||||
-rw-r--r-- | drivers/android/tests/binder_alloc_kunit.c | 572 |
12 files changed, 664 insertions, 437 deletions
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 07aa8ae0a058..5b3b8041f827 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -37,14 +37,15 @@ config ANDROID_BINDER_DEVICES created. Each binder device has its own context manager, and is therefore logically separated from the other devices. -config ANDROID_BINDER_IPC_SELFTEST - bool "Android Binder IPC Driver Selftest" - depends on ANDROID_BINDER_IPC +config ANDROID_BINDER_ALLOC_KUNIT_TEST + tristate "KUnit Tests for Android Binder Alloc" if !KUNIT_ALL_TESTS + depends on ANDROID_BINDER_IPC && KUNIT + default KUNIT_ALL_TESTS help - This feature allows binder selftest to run. + This feature builds the binder alloc KUnit tests. - Binder selftest checks the allocation and free of binder buffers - exhaustively with combinations of various buffer sizes and - alignments. + Each test case runs using a pared-down binder_alloc struct and + test-specific freelist, which allows this KUnit module to be loaded + for testing without interfering with a running system. endmenu diff --git a/drivers/android/Makefile b/drivers/android/Makefile index c9d3d0c99c25..c5d47be0276c 100644 --- a/drivers/android/Makefile +++ b/drivers/android/Makefile @@ -3,4 +3,4 @@ ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o -obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o +obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/ diff --git a/drivers/android/binder.c b/drivers/android/binder.c index c463ca4a8fff..312b462e349d 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -68,6 +68,8 @@ #include <linux/sizes.h> #include <linux/ktime.h> +#include <kunit/visibility.h> + #include <uapi/linux/android/binder.h> #include <linux/cacheflush.h> @@ -1585,11 +1587,10 @@ static struct binder_thread *binder_get_txn_from( { struct binder_thread *from; - spin_lock(&t->lock); + guard(spinlock)(&t->lock); from = t->from; if (from) atomic_inc(&from->tmp_ref); - spin_unlock(&t->lock); return from; } @@ -3144,10 +3145,8 @@ static void binder_transaction(struct binder_proc *proc, } if (!target_node) { binder_txn_error("%d:%d cannot find target node\n", - thread->pid, proc->pid); - /* - * return_error is set above - */ + proc->pid, thread->pid); + /* return_error is set above */ return_error_param = -EINVAL; return_error_line = __LINE__; goto err_dead_binder; @@ -5384,10 +5383,9 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg, void __user *ubuf = (void __user *)arg; struct binder_write_read bwr; - if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { - ret = -EFAULT; - goto out; - } + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) + return -EFAULT; + binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d write %lld at %016llx, read %lld at %016llx\n", proc->pid, thread->pid, @@ -5402,8 +5400,6 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg, trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) - ret = -EFAULT; goto out; } } @@ -5417,22 +5413,17 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg, if (!binder_worklist_empty_ilocked(&proc->todo)) binder_wakeup_proc_ilocked(proc); binder_inner_proc_unlock(proc); - if (ret < 0) { - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) - ret = -EFAULT; + if (ret < 0) goto out; - } } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d wrote %lld of %lld, read return %lld of %lld\n", proc->pid, thread->pid, (u64)bwr.write_consumed, (u64)bwr.write_size, (u64)bwr.read_consumed, (u64)bwr.read_size); - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { - ret = -EFAULT; - goto out; - } out: + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) + ret = -EFAULT; return ret; } @@ -5445,32 +5436,28 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, struct binder_node *new_node; kuid_t curr_euid = current_euid(); - mutex_lock(&context->context_mgr_node_lock); + guard(mutex)(&context->context_mgr_node_lock); if (context->binder_context_mgr_node) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); - ret = -EBUSY; - goto out; + return -EBUSY; } ret = security_binder_set_context_mgr(proc->cred); if (ret < 0) - goto out; + return ret; if (uid_valid(context->binder_context_mgr_uid)) { if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", from_kuid(&init_user_ns, curr_euid), from_kuid(&init_user_ns, context->binder_context_mgr_uid)); - ret = -EPERM; - goto out; + return -EPERM; } } else { context->binder_context_mgr_uid = curr_euid; } new_node = binder_new_node(proc, fbo); - if (!new_node) { - ret = -ENOMEM; - goto out; - } + if (!new_node) + return -ENOMEM; binder_node_lock(new_node); new_node->local_weak_refs++; new_node->local_strong_refs++; @@ -5479,8 +5466,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, context->binder_context_mgr_node = new_node; binder_node_unlock(new_node); binder_put_node(new_node); -out: - mutex_unlock(&context->context_mgr_node_lock); return ret; } @@ -5716,11 +5701,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct binder_thread *thread; void __user *ubuf = (void __user *)arg; - /*pr_info("binder_ioctl: %d:%d %x %lx\n", - proc->pid, current->pid, cmd, arg);*/ - - binder_selftest_alloc(&proc->alloc); - trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); @@ -5956,10 +5936,11 @@ static void binder_vma_close(struct vm_area_struct *vma) binder_alloc_vma_close(&proc->alloc); } -static vm_fault_t binder_vm_fault(struct vm_fault *vmf) +VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } +EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault); static const struct vm_operations_struct binder_vm_ops = { .open = binder_vma_open, @@ -6128,7 +6109,7 @@ static int binder_release(struct inode *nodp, struct file *filp) debugfs_remove(proc->debugfs_entry); if (proc->binderfs_entry) { - binderfs_remove_file(proc->binderfs_entry); + simple_recursive_removal(proc->binderfs_entry, NULL); proc->binderfs_entry = NULL; } @@ -6322,14 +6303,13 @@ static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { - mutex_lock(&binder_deferred_lock); + guard(mutex)(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, &binder_deferred_list); schedule_work(&binder_deferred_work); } - mutex_unlock(&binder_deferred_lock); } static void print_binder_transaction_ilocked(struct seq_file *m, @@ -6871,14 +6851,13 @@ static int proc_show(struct seq_file *m, void *unused) struct binder_proc *itr; int pid = (unsigned long)m->private; - mutex_lock(&binder_procs_lock); + guard(mutex)(&binder_procs_lock); hlist_for_each_entry(itr, &binder_procs, proc_node) { if (itr->pid == pid) { seq_puts(m, "binder proc state:\n"); print_binder_proc(m, itr, true, false); } } - mutex_unlock(&binder_procs_lock); return 0; } @@ -6996,16 +6975,14 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = { void binder_add_device(struct binder_device *device) { - spin_lock(&binder_devices_lock); + guard(spinlock)(&binder_devices_lock); hlist_add_head(&device->hlist, &binder_devices); - spin_unlock(&binder_devices_lock); } void binder_remove_device(struct binder_device *device) { - spin_lock(&binder_devices_lock); + guard(spinlock)(&binder_devices_lock); hlist_del_init(&device->hlist); - spin_unlock(&binder_devices_lock); } static int __init init_binder_device(const char *name) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index fcfaf1b899c8..979c96b74cad 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -23,10 +23,11 @@ #include <linux/uaccess.h> #include <linux/highmem.h> #include <linux/sizes.h> +#include <kunit/visibility.h> #include "binder_alloc.h" #include "binder_trace.h" -struct list_lru binder_freelist; +static struct list_lru binder_freelist; static DEFINE_MUTEX(binder_alloc_mmap_lock); @@ -57,13 +58,14 @@ static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) return list_entry(buffer->entry.prev, struct binder_buffer, entry); } -static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, - struct binder_buffer *buffer) +VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc, + struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &alloc->buffers)) return alloc->vm_start + alloc->buffer_size - buffer->user_data; return binder_buffer_next(buffer)->user_data - buffer->user_data; } +EXPORT_SYMBOL_IF_KUNIT(binder_alloc_buffer_size); static void binder_insert_free_buffer(struct binder_alloc *alloc, struct binder_buffer *new_buffer) @@ -167,12 +169,8 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, unsigned long user_ptr) { - struct binder_buffer *buffer; - - mutex_lock(&alloc->mutex); - buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); - mutex_unlock(&alloc->mutex); - return buffer; + guard(mutex)(&alloc->mutex); + return binder_alloc_prepare_to_free_locked(alloc, user_ptr); } static inline void @@ -210,7 +208,7 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc, trace_binder_free_lru_start(alloc, index); - ret = list_lru_add(&binder_freelist, + ret = list_lru_add(alloc->freelist, page_to_lru(page), page_to_nid(page), NULL); @@ -409,7 +407,7 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc, if (page) { trace_binder_alloc_lru_start(alloc, index); - on_lru = list_lru_del(&binder_freelist, + on_lru = list_lru_del(alloc->freelist, page_to_lru(page), page_to_nid(page), NULL); @@ -699,6 +697,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, out: return buffer; } +EXPORT_SYMBOL_IF_KUNIT(binder_alloc_new_buf); static unsigned long buffer_start_page(struct binder_buffer *buffer) { @@ -877,6 +876,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc, binder_free_buf_locked(alloc, buffer); mutex_unlock(&alloc->mutex); } +EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_buf); /** * binder_alloc_mmap_handler() - map virtual address space for proc @@ -959,7 +959,7 @@ err_invalid_mm: failure_string, ret); return ret; } - +EXPORT_SYMBOL_IF_KUNIT(binder_alloc_mmap_handler); void binder_alloc_deferred_release(struct binder_alloc *alloc) { @@ -1007,7 +1007,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) if (!page) continue; - on_lru = list_lru_del(&binder_freelist, + on_lru = list_lru_del(alloc->freelist, page_to_lru(page), page_to_nid(page), NULL); @@ -1028,6 +1028,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) "%s: %d buffers %d, pages %d\n", __func__, alloc->pid, buffers, page_count); } +EXPORT_SYMBOL_IF_KUNIT(binder_alloc_deferred_release); /** * binder_alloc_print_allocated() - print buffer info @@ -1043,7 +1044,7 @@ void binder_alloc_print_allocated(struct seq_file *m, struct binder_buffer *buffer; struct rb_node *n; - mutex_lock(&alloc->mutex); + guard(mutex)(&alloc->mutex); for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { buffer = rb_entry(n, struct binder_buffer, rb_node); seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", @@ -1053,7 +1054,6 @@ void binder_alloc_print_allocated(struct seq_file *m, buffer->extra_buffers_size, buffer->transaction ? "active" : "delivered"); } - mutex_unlock(&alloc->mutex); } /** @@ -1102,10 +1102,9 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) struct rb_node *n; int count = 0; - mutex_lock(&alloc->mutex); + guard(mutex)(&alloc->mutex); for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) count++; - mutex_unlock(&alloc->mutex); return count; } @@ -1122,6 +1121,7 @@ void binder_alloc_vma_close(struct binder_alloc *alloc) { binder_alloc_set_mapped(alloc, false); } +EXPORT_SYMBOL_IF_KUNIT(binder_alloc_vma_close); /** * binder_alloc_free_page() - shrinker callback to free pages @@ -1213,6 +1213,7 @@ err_mmap_read_lock_failed: err_mmget: return LRU_SKIP; } +EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_page); static unsigned long binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) @@ -1229,6 +1230,18 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) static struct shrinker *binder_shrinker; +VISIBLE_IF_KUNIT void __binder_alloc_init(struct binder_alloc *alloc, + struct list_lru *freelist) +{ + alloc->pid = current->group_leader->pid; + alloc->mm = current->mm; + mmgrab(alloc->mm); + mutex_init(&alloc->mutex); + INIT_LIST_HEAD(&alloc->buffers); + alloc->freelist = freelist; +} +EXPORT_SYMBOL_IF_KUNIT(__binder_alloc_init); + /** * binder_alloc_init() - called by binder_open() for per-proc initialization * @alloc: binder_alloc for this proc @@ -1238,11 +1251,7 @@ static struct shrinker *binder_shrinker; */ void binder_alloc_init(struct binder_alloc *alloc) { - alloc->pid = current->group_leader->pid; - alloc->mm = current->mm; - mmgrab(alloc->mm); - mutex_init(&alloc->mutex); - INIT_LIST_HEAD(&alloc->buffers); + __binder_alloc_init(alloc, &binder_freelist); } int binder_alloc_shrinker_init(void) diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index feecd7414241..d6f1f6f2d00e 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -15,7 +15,6 @@ #include <linux/list_lru.h> #include <uapi/linux/android/binder.h> -extern struct list_lru binder_freelist; struct binder_transaction; /** @@ -91,6 +90,7 @@ static inline struct list_head *page_to_lru(struct page *p) * @free_async_space: VA space available for async buffers. This is * initialized at mmap time to 1/2 the full VA space * @pages: array of struct page * + * @freelist: lru list to use for free pages (invariant after init) * @buffer_size: size of address space specified via mmap * @pid: pid for associated binder_proc (invariant after init) * @pages_high: high watermark of offset in @pages @@ -113,6 +113,7 @@ struct binder_alloc { struct rb_root allocated_buffers; size_t free_async_space; struct page **pages; + struct list_lru *freelist; size_t buffer_size; int pid; size_t pages_high; @@ -120,11 +121,6 @@ struct binder_alloc { bool oneway_spam_detected; }; -#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST -void binder_selftest_alloc(struct binder_alloc *alloc); -#else -static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} -#endif enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, void *cb_arg); @@ -160,12 +156,8 @@ void binder_alloc_print_pages(struct seq_file *m, static inline size_t binder_alloc_get_free_async_space(struct binder_alloc *alloc) { - size_t free_async_space; - - mutex_lock(&alloc->mutex); - free_async_space = alloc->free_async_space; - mutex_unlock(&alloc->mutex); - return free_async_space; + guard(mutex)(&alloc->mutex); + return alloc->free_async_space; } unsigned long @@ -187,5 +179,11 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, binder_size_t buffer_offset, size_t bytes); +#if IS_ENABLED(CONFIG_KUNIT) +void __binder_alloc_init(struct binder_alloc *alloc, struct list_lru *freelist); +size_t binder_alloc_buffer_size(struct binder_alloc *alloc, + struct binder_buffer *buffer); +#endif + #endif /* _LINUX_BINDER_ALLOC_H */ diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c deleted file mode 100644 index c88735c54848..000000000000 --- a/drivers/android/binder_alloc_selftest.c +++ /dev/null @@ -1,306 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* binder_alloc_selftest.c - * - * Android IPC Subsystem - * - * Copyright (C) 2017 Google, Inc. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/mm_types.h> -#include <linux/err.h> -#include "binder_alloc.h" - -#define BUFFER_NUM 5 -#define BUFFER_MIN_SIZE (PAGE_SIZE / 8) - -static bool binder_selftest_run = true; -static int binder_selftest_failures; -static DEFINE_MUTEX(binder_selftest_lock); - -/** - * enum buf_end_align_type - Page alignment of a buffer - * end with regard to the end of the previous buffer. - * - * In the pictures below, buf2 refers to the buffer we - * are aligning. buf1 refers to previous buffer by addr. - * Symbol [ means the start of a buffer, ] means the end - * of a buffer, and | means page boundaries. - */ -enum buf_end_align_type { - /** - * @SAME_PAGE_UNALIGNED: The end of this buffer is on - * the same page as the end of the previous buffer and - * is not page aligned. Examples: - * buf1 ][ buf2 ][ ... - * buf1 ]|[ buf2 ][ ... - */ - SAME_PAGE_UNALIGNED = 0, - /** - * @SAME_PAGE_ALIGNED: When the end of the previous buffer - * is not page aligned, the end of this buffer is on the - * same page as the end of the previous buffer and is page - * aligned. When the previous buffer is page aligned, the - * end of this buffer is aligned to the next page boundary. - * Examples: - * buf1 ][ buf2 ]| ... - * buf1 ]|[ buf2 ]| ... - */ - SAME_PAGE_ALIGNED, - /** - * @NEXT_PAGE_UNALIGNED: The end of this buffer is on - * the page next to the end of the previous buffer and - * is not page aligned. Examples: - * buf1 ][ buf2 | buf2 ][ ... - * buf1 ]|[ buf2 | buf2 ][ ... - */ - NEXT_PAGE_UNALIGNED, - /** - * @NEXT_PAGE_ALIGNED: The end of this buffer is on - * the page next to the end of the previous buffer and - * is page aligned. Examples: - * buf1 ][ buf2 | buf2 ]| ... - * buf1 ]|[ buf2 | buf2 ]| ... - */ - NEXT_PAGE_ALIGNED, - /** - * @NEXT_NEXT_UNALIGNED: The end of this buffer is on - * the page that follows the page after the end of the - * previous buffer and is not page aligned. Examples: - * buf1 ][ buf2 | buf2 | buf2 ][ ... - * buf1 ]|[ buf2 | buf2 | buf2 ][ ... - */ - NEXT_NEXT_UNALIGNED, - /** - * @LOOP_END: The number of enum values in &buf_end_align_type. - * It is used for controlling loop termination. - */ - LOOP_END, -}; - -static void pr_err_size_seq(size_t *sizes, int *seq) -{ - int i; - - pr_err("alloc sizes: "); - for (i = 0; i < BUFFER_NUM; i++) - pr_cont("[%zu]", sizes[i]); - pr_cont("\n"); - pr_err("free seq: "); - for (i = 0; i < BUFFER_NUM; i++) - pr_cont("[%d]", seq[i]); - pr_cont("\n"); -} - -static bool check_buffer_pages_allocated(struct binder_alloc *alloc, - struct binder_buffer *buffer, - size_t size) -{ - unsigned long page_addr; - unsigned long end; - int page_index; - - end = PAGE_ALIGN(buffer->user_data + size); - page_addr = buffer->user_data; - for (; page_addr < end; page_addr += PAGE_SIZE) { - page_index = (page_addr - alloc->vm_start) / PAGE_SIZE; - if (!alloc->pages[page_index] || - !list_empty(page_to_lru(alloc->pages[page_index]))) { - pr_err("expect alloc but is %s at page index %d\n", - alloc->pages[page_index] ? - "lru" : "free", page_index); - return false; - } - } - return true; -} - -static void binder_selftest_alloc_buf(struct binder_alloc *alloc, - struct binder_buffer *buffers[], - size_t *sizes, int *seq) -{ - int i; - - for (i = 0; i < BUFFER_NUM; i++) { - buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); - if (IS_ERR(buffers[i]) || - !check_buffer_pages_allocated(alloc, buffers[i], - sizes[i])) { - pr_err_size_seq(sizes, seq); - binder_selftest_failures++; - } - } -} - -static void binder_selftest_free_buf(struct binder_alloc *alloc, - struct binder_buffer *buffers[], - size_t *sizes, int *seq, size_t end) -{ - int i; - - for (i = 0; i < BUFFER_NUM; i++) - binder_alloc_free_buf(alloc, buffers[seq[i]]); - - for (i = 0; i < end / PAGE_SIZE; i++) { - /** - * Error message on a free page can be false positive - * if binder shrinker ran during binder_alloc_free_buf - * calls above. - */ - if (list_empty(page_to_lru(alloc->pages[i]))) { - pr_err_size_seq(sizes, seq); - pr_err("expect lru but is %s at page index %d\n", - alloc->pages[i] ? "alloc" : "free", i); - binder_selftest_failures++; - } - } -} - -static void binder_selftest_free_page(struct binder_alloc *alloc) -{ - int i; - unsigned long count; - - while ((count = list_lru_count(&binder_freelist))) { - list_lru_walk(&binder_freelist, binder_alloc_free_page, - NULL, count); - } - - for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { - if (alloc->pages[i]) { - pr_err("expect free but is %s at page index %d\n", - list_empty(page_to_lru(alloc->pages[i])) ? - "alloc" : "lru", i); - binder_selftest_failures++; - } - } -} - -static void binder_selftest_alloc_free(struct binder_alloc *alloc, - size_t *sizes, int *seq, size_t end) -{ - struct binder_buffer *buffers[BUFFER_NUM]; - - binder_selftest_alloc_buf(alloc, buffers, sizes, seq); - binder_selftest_free_buf(alloc, buffers, sizes, seq, end); - - /* Allocate from lru. */ - binder_selftest_alloc_buf(alloc, buffers, sizes, seq); - if (list_lru_count(&binder_freelist)) - pr_err("lru list should be empty but is not\n"); - - binder_selftest_free_buf(alloc, buffers, sizes, seq, end); - binder_selftest_free_page(alloc); -} - -static bool is_dup(int *seq, int index, int val) -{ - int i; - - for (i = 0; i < index; i++) { - if (seq[i] == val) - return true; - } - return false; -} - -/* Generate BUFFER_NUM factorial free orders. */ -static void binder_selftest_free_seq(struct binder_alloc *alloc, - size_t *sizes, int *seq, - int index, size_t end) -{ - int i; - - if (index == BUFFER_NUM) { - binder_selftest_alloc_free(alloc, sizes, seq, end); - return; - } - for (i = 0; i < BUFFER_NUM; i++) { - if (is_dup(seq, index, i)) - continue; - seq[index] = i; - binder_selftest_free_seq(alloc, sizes, seq, index + 1, end); - } -} - -static void binder_selftest_alloc_size(struct binder_alloc *alloc, - size_t *end_offset) -{ - int i; - int seq[BUFFER_NUM] = {0}; - size_t front_sizes[BUFFER_NUM]; - size_t back_sizes[BUFFER_NUM]; - size_t last_offset, offset = 0; - - for (i = 0; i < BUFFER_NUM; i++) { - last_offset = offset; - offset = end_offset[i]; - front_sizes[i] = offset - last_offset; - back_sizes[BUFFER_NUM - i - 1] = front_sizes[i]; - } - /* - * Buffers share the first or last few pages. - * Only BUFFER_NUM - 1 buffer sizes are adjustable since - * we need one giant buffer before getting to the last page. - */ - back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; - binder_selftest_free_seq(alloc, front_sizes, seq, 0, - end_offset[BUFFER_NUM - 1]); - binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size); -} - -static void binder_selftest_alloc_offset(struct binder_alloc *alloc, - size_t *end_offset, int index) -{ - int align; - size_t end, prev; - - if (index == BUFFER_NUM) { - binder_selftest_alloc_size(alloc, end_offset); - return; - } - prev = index == 0 ? 0 : end_offset[index - 1]; - end = prev; - - BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE); - - for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { - if (align % 2) - end = ALIGN(end, PAGE_SIZE); - else - end += BUFFER_MIN_SIZE; - end_offset[index] = end; - binder_selftest_alloc_offset(alloc, end_offset, index + 1); - } -} - -/** - * binder_selftest_alloc() - Test alloc and free of buffer pages. - * @alloc: Pointer to alloc struct. - * - * Allocate BUFFER_NUM buffers to cover all page alignment cases, - * then free them in all orders possible. Check that pages are - * correctly allocated, put onto lru when buffers are freed, and - * are freed when binder_alloc_free_page is called. - */ -void binder_selftest_alloc(struct binder_alloc *alloc) -{ - size_t end_offset[BUFFER_NUM]; - - if (!binder_selftest_run) - return; - mutex_lock(&binder_selftest_lock); - if (!binder_selftest_run || !alloc->mapped) - goto done; - pr_info("STARTED\n"); - binder_selftest_alloc_offset(alloc, end_offset, 0); - binder_selftest_run = false; - if (binder_selftest_failures > 0) - pr_info("%d tests FAILED\n", binder_selftest_failures); - else - pr_info("PASSED\n"); - -done: - mutex_unlock(&binder_selftest_lock); -} diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 1ba5caf1d88d..8b08976146ba 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -81,7 +81,6 @@ extern bool is_binderfs_device(const struct inode *inode); extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name, const struct file_operations *fops, void *data); -extern void binderfs_remove_file(struct dentry *dentry); #else static inline bool is_binderfs_device(const struct inode *inode) { @@ -94,7 +93,6 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir, { return NULL; } -static inline void binderfs_remove_file(struct dentry *dentry) {} #endif #ifdef CONFIG_ANDROID_BINDERFS @@ -592,4 +590,8 @@ void binder_add_device(struct binder_device *device); */ void binder_remove_device(struct binder_device *device); +#if IS_ENABLED(CONFIG_KUNIT) +vm_fault_t binder_vm_fault(struct vm_fault *vmf); +#endif + #endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 16de1b9e72f7..97a78e5623db 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -34,27 +34,6 @@ TRACE_EVENT(binder_ioctl, TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg) ); -DECLARE_EVENT_CLASS(binder_lock_class, - TP_PROTO(const char *tag), - TP_ARGS(tag), - TP_STRUCT__entry( - __field(const char *, tag) - ), - TP_fast_assign( - __entry->tag = tag; - ), - TP_printk("tag=%s", __entry->tag) -); - -#define DEFINE_BINDER_LOCK_EVENT(name) \ -DEFINE_EVENT(binder_lock_class, name, \ - TP_PROTO(const char *func), \ - TP_ARGS(func)) - -DEFINE_BINDER_LOCK_EVENT(binder_lock); -DEFINE_BINDER_LOCK_EVENT(binder_locked); -DEFINE_BINDER_LOCK_EVENT(binder_unlock); - DECLARE_EVENT_CLASS(binder_function_return_class, TP_PROTO(int ret), TP_ARGS(ret), diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 024275dbfdd8..0d9d95a7fb60 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -117,7 +117,6 @@ static int binderfs_binder_device_create(struct inode *ref_inode, struct dentry *dentry, *root; struct binder_device *device; char *name = NULL; - size_t name_len; struct inode *inode = NULL; struct super_block *sb = ref_inode->i_sb; struct binderfs_info *info = sb->s_fs_info; @@ -161,9 +160,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode, inode->i_gid = info->root_gid; req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ - name_len = strlen(req->name); - /* Make sure to include terminating NUL byte */ - name = kmemdup(req->name, name_len + 1, GFP_KERNEL); + name = kstrdup(req->name, GFP_KERNEL); if (!name) goto err; @@ -500,21 +497,6 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent, return dentry; } -void binderfs_remove_file(struct dentry *dentry) -{ - struct inode *parent_inode; - - parent_inode = d_inode(dentry->d_parent); - inode_lock(parent_inode); - if (simple_positive(dentry)) { - dget(dentry); - simple_unlink(parent_inode, dentry); - d_delete(dentry); - dput(dentry); - } - inode_unlock(parent_inode); -} - struct dentry *binderfs_create_file(struct dentry *parent, const char *name, const struct file_operations *fops, void *data) diff --git a/drivers/android/tests/.kunitconfig b/drivers/android/tests/.kunitconfig new file mode 100644 index 000000000000..39b76bab9d9a --- /dev/null +++ b/drivers/android/tests/.kunitconfig @@ -0,0 +1,7 @@ +# +# Copyright 2025 Google LLC. +# + +CONFIG_KUNIT=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST=y diff --git a/drivers/android/tests/Makefile b/drivers/android/tests/Makefile new file mode 100644 index 000000000000..27268418eb03 --- /dev/null +++ b/drivers/android/tests/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright 2025 Google LLC. +# + +obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += binder_alloc_kunit.o diff --git a/drivers/android/tests/binder_alloc_kunit.c b/drivers/android/tests/binder_alloc_kunit.c new file mode 100644 index 000000000000..9b884d977f76 --- /dev/null +++ b/drivers/android/tests/binder_alloc_kunit.c @@ -0,0 +1,572 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test cases for binder allocator code. + * + * Copyright 2025 Google LLC. + * Author: Tiffany Yang <ynaffit@google.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <kunit/test.h> +#include <linux/anon_inodes.h> +#include <linux/err.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/seq_buf.h> +#include <linux/sizes.h> + +#include "../binder_alloc.h" +#include "../binder_internal.h" + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +#define BINDER_MMAP_SIZE SZ_128K + +#define BUFFER_NUM 5 +#define BUFFER_MIN_SIZE (PAGE_SIZE / 8) + +#define FREESEQ_BUFLEN ((3 * BUFFER_NUM) + 1) + +#define ALIGN_TYPE_STRLEN (12) + +#define ALIGNMENTS_BUFLEN (((ALIGN_TYPE_STRLEN + 6) * BUFFER_NUM) + 1) + +#define PRINT_ALL_CASES (0) + +/* 5^5 alignment combinations * 2 places to share pages * 5! free sequences */ +#define TOTAL_EXHAUSTIVE_CASES (3125 * 2 * 120) + +/** + * enum buf_end_align_type - Page alignment of a buffer + * end with regard to the end of the previous buffer. + * + * In the pictures below, buf2 refers to the buffer we + * are aligning. buf1 refers to previous buffer by addr. + * Symbol [ means the start of a buffer, ] means the end + * of a buffer, and | means page boundaries. + */ +enum buf_end_align_type { + /** + * @SAME_PAGE_UNALIGNED: The end of this buffer is on + * the same page as the end of the previous buffer and + * is not page aligned. Examples: + * buf1 ][ buf2 ][ ... + * buf1 ]|[ buf2 ][ ... + */ + SAME_PAGE_UNALIGNED = 0, + /** + * @SAME_PAGE_ALIGNED: When the end of the previous buffer + * is not page aligned, the end of this buffer is on the + * same page as the end of the previous buffer and is page + * aligned. When the previous buffer is page aligned, the + * end of this buffer is aligned to the next page boundary. + * Examples: + * buf1 ][ buf2 ]| ... + * buf1 ]|[ buf2 ]| ... + */ + SAME_PAGE_ALIGNED, + /** + * @NEXT_PAGE_UNALIGNED: The end of this buffer is on + * the page next to the end of the previous buffer and + * is not page aligned. Examples: + * buf1 ][ buf2 | buf2 ][ ... + * buf1 ]|[ buf2 | buf2 ][ ... + */ + NEXT_PAGE_UNALIGNED, + /** + * @NEXT_PAGE_ALIGNED: The end of this buffer is on + * the page next to the end of the previous buffer and + * is page aligned. Examples: + * buf1 ][ buf2 | buf2 ]| ... + * buf1 ]|[ buf2 | buf2 ]| ... + */ + NEXT_PAGE_ALIGNED, + /** + * @NEXT_NEXT_UNALIGNED: The end of this buffer is on + * the page that follows the page after the end of the + * previous buffer and is not page aligned. Examples: + * buf1 ][ buf2 | buf2 | buf2 ][ ... + * buf1 ]|[ buf2 | buf2 | buf2 ][ ... + */ + NEXT_NEXT_UNALIGNED, + /** + * @LOOP_END: The number of enum values in &buf_end_align_type. + * It is used for controlling loop termination. + */ + LOOP_END, +}; + +static const char *const buf_end_align_type_strs[LOOP_END] = { + [SAME_PAGE_UNALIGNED] = "SP_UNALIGNED", + [SAME_PAGE_ALIGNED] = " SP_ALIGNED ", + [NEXT_PAGE_UNALIGNED] = "NP_UNALIGNED", + [NEXT_PAGE_ALIGNED] = " NP_ALIGNED ", + [NEXT_NEXT_UNALIGNED] = "NN_UNALIGNED", +}; + +struct binder_alloc_test_case_info { + char alignments[ALIGNMENTS_BUFLEN]; + struct seq_buf alignments_sb; + size_t *buffer_sizes; + int *free_sequence; + bool front_pages; +}; + +static void stringify_free_seq(struct kunit *test, int *seq, struct seq_buf *sb) +{ + int i; + + for (i = 0; i < BUFFER_NUM; i++) + seq_buf_printf(sb, "[%d]", seq[i]); + + KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb)); +} + +static void stringify_alignments(struct kunit *test, int *alignments, + struct seq_buf *sb) +{ + int i; + + for (i = 0; i < BUFFER_NUM; i++) + seq_buf_printf(sb, "[ %d:%s ]", i, + buf_end_align_type_strs[alignments[i]]); + + KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb)); +} + +static bool check_buffer_pages_allocated(struct kunit *test, + struct binder_alloc *alloc, + struct binder_buffer *buffer, + size_t size) +{ + unsigned long page_addr; + unsigned long end; + int page_index; + + end = PAGE_ALIGN(buffer->user_data + size); + page_addr = buffer->user_data; + for (; page_addr < end; page_addr += PAGE_SIZE) { + page_index = (page_addr - alloc->vm_start) / PAGE_SIZE; + if (!alloc->pages[page_index] || + !list_empty(page_to_lru(alloc->pages[page_index]))) { + kunit_err(test, "expect alloc but is %s at page index %d\n", + alloc->pages[page_index] ? + "lru" : "free", page_index); + return false; + } + } + return true; +} + +static unsigned long binder_alloc_test_alloc_buf(struct kunit *test, + struct binder_alloc *alloc, + struct binder_buffer *buffers[], + size_t *sizes, int *seq) +{ + unsigned long failures = 0; + int i; + + for (i = 0; i < BUFFER_NUM; i++) { + buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); + if (IS_ERR(buffers[i]) || + !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i])) + failures++; + } + + return failures; +} + +static unsigned long binder_alloc_test_free_buf(struct kunit *test, + struct binder_alloc *alloc, + struct binder_buffer *buffers[], + size_t *sizes, int *seq, size_t end) +{ + unsigned long failures = 0; + int i; + + for (i = 0; i < BUFFER_NUM; i++) + binder_alloc_free_buf(alloc, buffers[seq[i]]); + + for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) { + if (list_empty(page_to_lru(alloc->pages[i]))) { + kunit_err(test, "expect lru but is %s at page index %d\n", + alloc->pages[i] ? "alloc" : "free", i); + failures++; + } + } + + return failures; +} + +static unsigned long binder_alloc_test_free_page(struct kunit *test, + struct binder_alloc *alloc) +{ + unsigned long failures = 0; + unsigned long count; + int i; + + while ((count = list_lru_count(alloc->freelist))) { + list_lru_walk(alloc->freelist, binder_alloc_free_page, + NULL, count); + } + + for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { + if (alloc->pages[i]) { + kunit_err(test, "expect free but is %s at page index %d\n", + list_empty(page_to_lru(alloc->pages[i])) ? + "alloc" : "lru", i); + failures++; + } + } + + return failures; +} + +/* Executes one full test run for the given test case. */ +static bool binder_alloc_test_alloc_free(struct kunit *test, + struct binder_alloc *alloc, + struct binder_alloc_test_case_info *tc, + size_t end) +{ + unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE; + struct binder_buffer *buffers[BUFFER_NUM]; + unsigned long failures; + bool failed = false; + + failures = binder_alloc_test_alloc_buf(test, alloc, buffers, + tc->buffer_sizes, + tc->free_sequence); + failed = failed || failures; + KUNIT_EXPECT_EQ_MSG(test, failures, 0, + "Initial allocation failed: %lu/%u buffers with errors", + failures, BUFFER_NUM); + + failures = binder_alloc_test_free_buf(test, alloc, buffers, + tc->buffer_sizes, + tc->free_sequence, end); + failed = failed || failures; + KUNIT_EXPECT_EQ_MSG(test, failures, 0, + "Initial buffers not freed correctly: %lu/%lu pages not on lru list", + failures, pages); + + /* Allocate from lru. */ + failures = binder_alloc_test_alloc_buf(test, alloc, buffers, + tc->buffer_sizes, + tc->free_sequence); + failed = failed || failures; + KUNIT_EXPECT_EQ_MSG(test, failures, 0, + "Reallocation failed: %lu/%u buffers with errors", + failures, BUFFER_NUM); + + failures = list_lru_count(alloc->freelist); + failed = failed || failures; + KUNIT_EXPECT_EQ_MSG(test, failures, 0, + "lru list should be empty after reallocation but still has %lu pages", + failures); + + failures = binder_alloc_test_free_buf(test, alloc, buffers, + tc->buffer_sizes, + tc->free_sequence, end); + failed = failed || failures; + KUNIT_EXPECT_EQ_MSG(test, failures, 0, + "Reallocated buffers not freed correctly: %lu/%lu pages not on lru list", + failures, pages); + + failures = binder_alloc_test_free_page(test, alloc); + failed = failed || failures; + KUNIT_EXPECT_EQ_MSG(test, failures, 0, + "Failed to clean up allocated pages: %lu/%lu pages still installed", + failures, (alloc->buffer_size / PAGE_SIZE)); + + return failed; +} + +static bool is_dup(int *seq, int index, int val) +{ + int i; + + for (i = 0; i < index; i++) { + if (seq[i] == val) + return true; + } + return false; +} + +/* Generate BUFFER_NUM factorial free orders. */ +static void permute_frees(struct kunit *test, struct binder_alloc *alloc, + struct binder_alloc_test_case_info *tc, + unsigned long *runs, unsigned long *failures, + int index, size_t end) +{ + bool case_failed; + int i; + + if (index == BUFFER_NUM) { + DECLARE_SEQ_BUF(freeseq_sb, FREESEQ_BUFLEN); + + case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end); + *runs += 1; + *failures += case_failed; + + if (case_failed || PRINT_ALL_CASES) { + stringify_free_seq(test, tc->free_sequence, + &freeseq_sb); + kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs, + case_failed ? "FAILED" : "PASSED", + tc->front_pages ? "front" : "back ", + seq_buf_str(&tc->alignments_sb), + seq_buf_str(&freeseq_sb)); + } + + return; + } + for (i = 0; i < BUFFER_NUM; i++) { + if (is_dup(tc->free_sequence, index, i)) + continue; + tc->free_sequence[index] = i; + permute_frees(test, alloc, tc, runs, failures, index + 1, end); + } +} + +static void gen_buf_sizes(struct kunit *test, + struct binder_alloc *alloc, + struct binder_alloc_test_case_info *tc, + size_t *end_offset, unsigned long *runs, + unsigned long *failures) +{ + size_t last_offset, offset = 0; + size_t front_sizes[BUFFER_NUM]; + size_t back_sizes[BUFFER_NUM]; + int seq[BUFFER_NUM] = {0}; + int i; + + tc->free_sequence = seq; + for (i = 0; i < BUFFER_NUM; i++) { + last_offset = offset; + offset = end_offset[i]; + front_sizes[i] = offset - last_offset; + back_sizes[BUFFER_NUM - i - 1] = front_sizes[i]; + } + back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; + + /* + * Buffers share the first or last few pages. + * Only BUFFER_NUM - 1 buffer sizes are adjustable since + * we need one giant buffer before getting to the last page. + */ + tc->front_pages = true; + tc->buffer_sizes = front_sizes; + permute_frees(test, alloc, tc, runs, failures, 0, + end_offset[BUFFER_NUM - 1]); + + tc->front_pages = false; + tc->buffer_sizes = back_sizes; + permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size); +} + +static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc, + size_t *end_offset, int *alignments, + unsigned long *runs, unsigned long *failures, + int index) +{ + size_t end, prev; + int align; + + if (index == BUFFER_NUM) { + struct binder_alloc_test_case_info tc = {0}; + + seq_buf_init(&tc.alignments_sb, tc.alignments, + ALIGNMENTS_BUFLEN); + stringify_alignments(test, alignments, &tc.alignments_sb); + + gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures); + return; + } + prev = index == 0 ? 0 : end_offset[index - 1]; + end = prev; + + BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE); + + for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { + if (align % 2) + end = ALIGN(end, PAGE_SIZE); + else + end += BUFFER_MIN_SIZE; + end_offset[index] = end; + alignments[index] = align; + gen_buf_offsets(test, alloc, end_offset, alignments, runs, + failures, index + 1); + } +} + +struct binder_alloc_test { + struct binder_alloc alloc; + struct list_lru binder_test_freelist; + struct file *filp; + unsigned long mmap_uaddr; +}; + +static void binder_alloc_test_init_freelist(struct kunit *test) +{ + struct binder_alloc_test *priv = test->priv; + + KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist, + &priv->binder_test_freelist); +} + +static void binder_alloc_test_mmap(struct kunit *test) +{ + struct binder_alloc_test *priv = test->priv; + struct binder_alloc *alloc = &priv->alloc; + struct binder_buffer *buf; + struct rb_node *n; + + KUNIT_EXPECT_EQ(test, alloc->mapped, true); + KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE); + + n = rb_first(&alloc->allocated_buffers); + KUNIT_EXPECT_PTR_EQ(test, n, NULL); + + n = rb_first(&alloc->free_buffers); + buf = rb_entry(n, struct binder_buffer, rb_node); + KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf), + BINDER_MMAP_SIZE); + KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers)); +} + +/** + * binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages. + * @test: The test context object. + * + * Allocate BUFFER_NUM buffers to cover all page alignment cases, + * then free them in all orders possible. Check that pages are + * correctly allocated, put onto lru when buffers are freed, and + * are freed when binder_alloc_free_page() is called. + */ +static void binder_alloc_exhaustive_test(struct kunit *test) +{ + struct binder_alloc_test *priv = test->priv; + size_t end_offset[BUFFER_NUM]; + int alignments[BUFFER_NUM]; + unsigned long failures = 0; + unsigned long runs = 0; + + gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs, + &failures, 0); + + KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES); + KUNIT_EXPECT_EQ(test, failures, 0); +} + +/* ===== End test cases ===== */ + +static void binder_alloc_test_vma_close(struct vm_area_struct *vma) +{ + struct binder_alloc *alloc = vma->vm_private_data; + + binder_alloc_vma_close(alloc); +} + +static const struct vm_operations_struct binder_alloc_test_vm_ops = { + .close = binder_alloc_test_vma_close, + .fault = binder_vm_fault, +}; + +static int binder_alloc_test_mmap_handler(struct file *filp, + struct vm_area_struct *vma) +{ + struct binder_alloc *alloc = filp->private_data; + + vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE); + + vma->vm_ops = &binder_alloc_test_vm_ops; + vma->vm_private_data = alloc; + + return binder_alloc_mmap_handler(alloc, vma); +} + +static const struct file_operations binder_alloc_test_fops = { + .mmap = binder_alloc_test_mmap_handler, +}; + +static int binder_alloc_test_init(struct kunit *test) +{ + struct binder_alloc_test *priv; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + test->priv = priv; + + ret = list_lru_init(&priv->binder_test_freelist); + if (ret) { + kunit_err(test, "Failed to initialize test freelist\n"); + return ret; + } + + /* __binder_alloc_init requires mm to be attached */ + ret = kunit_attach_mm(); + if (ret) { + kunit_err(test, "Failed to attach mm\n"); + return ret; + } + __binder_alloc_init(&priv->alloc, &priv->binder_test_freelist); + + priv->filp = anon_inode_getfile("binder_alloc_kunit", + &binder_alloc_test_fops, &priv->alloc, + O_RDWR | O_CLOEXEC); + if (IS_ERR_OR_NULL(priv->filp)) { + kunit_err(test, "Failed to open binder alloc test driver file\n"); + return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM; + } + + priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE, + PROT_READ, MAP_PRIVATE | MAP_NORESERVE, + 0); + if (!priv->mmap_uaddr) { + kunit_err(test, "Could not map the test's transaction memory\n"); + return -ENOMEM; + } + + return 0; +} + +static void binder_alloc_test_exit(struct kunit *test) +{ + struct binder_alloc_test *priv = test->priv; + + /* Close the backing file to make sure binder_alloc_vma_close runs */ + if (!IS_ERR_OR_NULL(priv->filp)) + fput(priv->filp); + + if (priv->alloc.mm) + binder_alloc_deferred_release(&priv->alloc); + + /* Make sure freelist is empty */ + KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0); + list_lru_destroy(&priv->binder_test_freelist); +} + +static struct kunit_case binder_alloc_test_cases[] = { + KUNIT_CASE(binder_alloc_test_init_freelist), + KUNIT_CASE(binder_alloc_test_mmap), + KUNIT_CASE(binder_alloc_exhaustive_test), + {} +}; + +static struct kunit_suite binder_alloc_test_suite = { + .name = "binder_alloc", + .test_cases = binder_alloc_test_cases, + .init = binder_alloc_test_init, + .exit = binder_alloc_test_exit, +}; + +kunit_test_suite(binder_alloc_test_suite); + +MODULE_AUTHOR("Tiffany Yang <ynaffit@google.com>"); +MODULE_DESCRIPTION("Binder Alloc KUnit tests"); +MODULE_LICENSE("GPL"); |