diff options
Diffstat (limited to 'drivers/android/binder.c')
| -rw-r--r-- | drivers/android/binder.c | 1152 |
1 files changed, 856 insertions, 296 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 880224ec6abb..535fc881c8da 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -66,11 +66,15 @@ #include <linux/syscalls.h> #include <linux/task_work.h> #include <linux/sizes.h> +#include <linux/ktime.h> + +#include <kunit/visibility.h> #include <uapi/linux/android/binder.h> #include <linux/cacheflush.h> +#include "binder_netlink.h" #include "binder_internal.h" #include "binder_trace.h" @@ -78,6 +82,8 @@ static HLIST_HEAD(binder_deferred_list); static DEFINE_MUTEX(binder_deferred_lock); static HLIST_HEAD(binder_devices); +static DEFINE_SPINLOCK(binder_devices_lock); + static HLIST_HEAD(binder_procs); static DEFINE_MUTEX(binder_procs_lock); @@ -276,12 +282,12 @@ _binder_proc_lock(struct binder_proc *proc, int line) } /** - * binder_proc_unlock() - Release spinlock for given binder_proc - * @proc: struct binder_proc to acquire + * binder_proc_unlock() - Release outer lock for given binder_proc + * @proc: struct binder_proc to acquire * * Release lock acquired via binder_proc_lock() */ -#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) +#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__) static void _binder_proc_unlock(struct binder_proc *proc, int line) __releases(&proc->outer_lock) @@ -378,7 +384,7 @@ _binder_node_inner_lock(struct binder_node *node, int line) } /** - * binder_node_unlock() - Release node and inner locks + * binder_node_inner_unlock() - Release node and inner locks * @node: struct binder_node to acquire * * Release lock acquired via binder_node_lock() @@ -477,6 +483,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread, { WARN_ON(!list_empty(&thread->waiting_thread_node)); binder_enqueue_work_ilocked(work, &thread->todo); + + /* (e)poll-based threads require an explicit wakeup signal when + * queuing their own work; they rely on these events to consume + * messages without I/O block. Without it, threads risk waiting + * indefinitely without handling the work. + */ + if (thread->looper & BINDER_LOOPER_STATE_POLL && + thread->pid == current->pid && !thread->process_todo) + wake_up_interruptible_sync(&thread->wait); + thread->process_todo = true; } @@ -559,9 +575,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) { return !thread->transaction_stack && - binder_worklist_empty_ilocked(&thread->todo) && - (thread->looper & (BINDER_LOOPER_STATE_ENTERED | - BINDER_LOOPER_STATE_REGISTERED)); + binder_worklist_empty_ilocked(&thread->todo); } static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, @@ -837,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, } else { if (!internal) node->local_weak_refs++; - if (!node->has_weak_ref && list_empty(&node->work.entry)) { - if (target_list == NULL) { - pr_err("invalid inc weak node for %d\n", - node->debug_id); - return -EINVAL; - } - /* - * See comment above - */ + if (!node->has_weak_ref && target_list && list_empty(&node->work.entry)) binder_enqueue_work_ilocked(&node->work, target_list); - } } return 0; } @@ -1034,6 +1039,63 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, return NULL; } +/* Find the smallest unused descriptor the "slow way" */ +static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset) +{ + struct binder_ref *ref; + struct rb_node *n; + u32 desc; + + desc = offset; + for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) { + ref = rb_entry(n, struct binder_ref, rb_node_desc); + if (ref->data.desc > desc) + break; + desc = ref->data.desc + 1; + } + + return desc; +} + +/* + * Find an available reference descriptor ID. The proc->outer_lock might + * be released in the process, in which case -EAGAIN is returned and the + * @desc should be considered invalid. + */ +static int get_ref_desc_olocked(struct binder_proc *proc, + struct binder_node *node, + u32 *desc) +{ + struct dbitmap *dmap = &proc->dmap; + unsigned int nbits, offset; + unsigned long *new, bit; + + /* 0 is reserved for the context manager */ + offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1; + + if (!dbitmap_enabled(dmap)) { + *desc = slow_desc_lookup_olocked(proc, offset); + return 0; + } + + if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) { + *desc = bit; + return 0; + } + + /* + * The dbitmap is full and needs to grow. The proc->outer_lock + * is briefly released to allocate the new bitmap safely. + */ + nbits = dbitmap_grow_nbits(dmap); + binder_proc_unlock(proc); + new = bitmap_zalloc(nbits, GFP_KERNEL); + binder_proc_lock(proc); + dbitmap_grow(dmap, new, nbits); + + return -EAGAIN; +} + /** * binder_get_ref_for_node_olocked() - get the ref associated with given node * @proc: binder_proc that owns the ref @@ -1057,12 +1119,14 @@ static struct binder_ref *binder_get_ref_for_node_olocked( struct binder_node *node, struct binder_ref *new_ref) { - struct binder_context *context = proc->context; - struct rb_node **p = &proc->refs_by_node.rb_node; - struct rb_node *parent = NULL; struct binder_ref *ref; - struct rb_node *n; + struct rb_node *parent; + struct rb_node **p; + u32 desc; +retry: + p = &proc->refs_by_node.rb_node; + parent = NULL; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_node); @@ -1077,6 +1141,10 @@ static struct binder_ref *binder_get_ref_for_node_olocked( if (!new_ref) return NULL; + /* might release the proc->outer_lock */ + if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN) + goto retry; + binder_stats_created(BINDER_STAT_REF); new_ref->data.debug_id = atomic_inc_return(&binder_last_id); new_ref->proc = proc; @@ -1084,14 +1152,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked( rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); - new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; - for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { - ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (ref->data.desc > new_ref->data.desc) - break; - new_ref->data.desc = ref->data.desc + 1; - } - + new_ref->data.desc = desc; p = &proc->refs_by_desc.rb_node; while (*p) { parent = *p; @@ -1120,6 +1181,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked( static void binder_cleanup_ref_olocked(struct binder_ref *ref) { + struct dbitmap *dmap = &ref->proc->dmap; bool delete_node = false; binder_debug(BINDER_DEBUG_INTERNAL_REFS, @@ -1127,6 +1189,8 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref) ref->proc->pid, ref->data.debug_id, ref->data.desc, ref->node->debug_id); + if (dbitmap_enabled(dmap)) + dbitmap_clear_bit(dmap, ref->data.desc); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); @@ -1157,6 +1221,12 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref) binder_dequeue_work(ref->proc, &ref->death->work); binder_stats_deleted(BINDER_STAT_DEATH); } + + if (ref->freeze) { + binder_dequeue_work(ref->proc, &ref->freeze->work); + binder_stats_deleted(BINDER_STAT_FREEZE); + } + binder_stats_deleted(BINDER_STAT_REF); } @@ -1194,13 +1264,13 @@ static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, } /** - * binder_dec_ref() - dec the ref for given handle + * binder_dec_ref_olocked() - dec the ref for given handle * @ref: ref to be decremented * @strong: if true, strong decrement, else weak * * Decrement the ref. * - * Return: true if ref is cleaned up and ready to be freed + * Return: %true if ref is cleaned up and ready to be freed. */ static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) { @@ -1284,9 +1354,29 @@ static void binder_free_ref(struct binder_ref *ref) if (ref->node) binder_free_node(ref->node); kfree(ref->death); + kfree(ref->freeze); kfree(ref); } +/* shrink descriptor bitmap if needed */ +static void try_shrink_dmap(struct binder_proc *proc) +{ + unsigned long *new; + int nbits; + + binder_proc_lock(proc); + nbits = dbitmap_shrink_nbits(&proc->dmap); + binder_proc_unlock(proc); + + if (!nbits) + return; + + new = bitmap_zalloc(nbits, GFP_KERNEL); + binder_proc_lock(proc); + dbitmap_shrink(&proc->dmap, new, nbits); + binder_proc_unlock(proc); +} + /** * binder_update_ref_for_handle() - inc/dec the ref for given handle * @proc: proc containing the ref @@ -1323,8 +1413,10 @@ static int binder_update_ref_for_handle(struct binder_proc *proc, *rdata = ref->data; binder_proc_unlock(proc); - if (delete_ref) + if (delete_ref) { binder_free_ref(ref); + try_shrink_dmap(proc); + } return ret; err_no_ref: @@ -1457,7 +1549,7 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread) * by threads that are being released. When done with the binder_proc, * this function is called to decrement the counter and free the * proc if appropriate (proc has been released, all threads have - * been released and not currenly in-use to process a transaction). + * been released and not currently in-use to process a transaction). */ static void binder_proc_dec_tmpref(struct binder_proc *proc) { @@ -1487,11 +1579,10 @@ static struct binder_thread *binder_get_txn_from( { struct binder_thread *from; - spin_lock(&t->lock); + guard(spinlock)(&t->lock); from = t->from; if (from) atomic_inc(&from->tmp_ref); - spin_unlock(&t->lock); return from; } @@ -1697,8 +1788,10 @@ static size_t binder_get_object(struct binder_proc *proc, size_t object_size = 0; read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); - if (offset > buffer->data_size || read_size < sizeof(*hdr)) + if (offset > buffer->data_size || read_size < sizeof(*hdr) || + !IS_ALIGNED(offset, sizeof(u32))) return 0; + if (u) { if (copy_from_user(object, u + offset, read_size)) return 0; @@ -1873,7 +1966,7 @@ static bool binder_validate_fixup(struct binder_proc *proc, * struct binder_task_work_cb - for deferred close * * @twork: callback_head for task work - * @fd: fd to close + * @file: file to close * * Structure to pass task work to be handled after * returning from binder_ioctl() via task_work_add(). @@ -1920,7 +2013,7 @@ static void binder_deferred_fd_close(int fd) if (!twcb) return; init_task_work(&twcb->twork, binder_do_fd_close); - twcb->file = close_fd_get_file(fd); + twcb->file = file_close_fd(fd); if (twcb->file) { // pin it until binder_do_fd_close(); see comments there get_file(twcb->file); @@ -1934,24 +2027,23 @@ static void binder_deferred_fd_close(int fd) static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, - binder_size_t failed_at, + binder_size_t off_end_offset, bool is_failure) { int debug_id = buffer->debug_id; - binder_size_t off_start_offset, buffer_offset, off_end_offset; + binder_size_t off_start_offset, buffer_offset; binder_debug(BINDER_DEBUG_TRANSACTION, "%d buffer release %d, size %zd-%zd, failed at %llx\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, - (unsigned long long)failed_at); + (unsigned long long)off_end_offset); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); - off_end_offset = is_failure && failed_at ? failed_at : - off_start_offset + buffer->offsets_size; + for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { struct binder_object_header *hdr; @@ -2077,9 +2169,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, * Convert the address to an offset relative to * the base of the transaction buffer. */ - fda_offset = - (parent->buffer - (uintptr_t)buffer->user_data) + - fda->parent_offset; + fda_offset = parent->buffer - buffer->user_data + + fda->parent_offset; for (fd_index = 0; fd_index < fda->num_fds; fd_index++) { u32 fd; @@ -2111,6 +2202,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } } +/* Clean up all the objects in the buffer */ +static inline void binder_release_entire_buffer(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_buffer *buffer, + bool is_failure) +{ + binder_size_t off_end_offset; + + off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); + off_end_offset += buffer->offsets_size; + + binder_transaction_buffer_release(proc, thread, buffer, + off_end_offset, is_failure); +} + static int binder_translate_binder(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) @@ -2303,10 +2409,10 @@ err_fd_not_accepted: /** * struct binder_ptr_fixup - data to be fixed-up in target buffer - * @offset offset in target buffer to fixup - * @skip_size bytes to skip in copy (fixup will be written later) - * @fixup_data data to write at fixup offset - * @node list node + * @offset: offset in target buffer to fixup + * @skip_size: bytes to skip in copy (fixup will be written later) + * @fixup_data: data to write at fixup offset + * @node: list node * * This is used for the pointer fixup list (pf) which is created and consumed * during binder_transaction() and is only accessed locally. No @@ -2323,10 +2429,10 @@ struct binder_ptr_fixup { /** * struct binder_sg_copy - scatter-gather data to be copied - * @offset offset in target buffer - * @sender_uaddr user address in source buffer - * @length bytes to copy - * @node list node + * @offset: offset in target buffer + * @sender_uaddr: user address in source buffer + * @length: bytes to copy + * @node: list node * * This is used for the sg copy list (sgc) which is created and consumed * during binder_transaction() and is only accessed locally. No @@ -2582,7 +2688,7 @@ static int binder_translate_fd_array(struct list_head *pf_head, * Convert the address to an offset relative to * the base of the transaction buffer. */ - fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + + fda_offset = parent->buffer - t->buffer->user_data + fda->parent_offset; sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + fda->parent_offset; @@ -2657,8 +2763,9 @@ static int binder_fixup_parent(struct list_head *pf_head, proc->pid, thread->pid); return -EINVAL; } - buffer_offset = bp->parent_offset + - (uintptr_t)parent->buffer - (uintptr_t)b->user_data; + + buffer_offset = bp->parent_offset + parent->buffer - b->user_data; + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); } @@ -2728,7 +2835,10 @@ binder_find_outdated_transaction_ilocked(struct binder_transaction *t, * * Return: 0 if the transaction was successfully queued * BR_DEAD_REPLY if the target process or thread is dead - * BR_FROZEN_REPLY if the target process or thread is frozen + * BR_FROZEN_REPLY if the target process or thread is frozen and + * the sync transaction was rejected + * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen + * and the async transaction was successfully queued */ static int binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, @@ -2738,6 +2848,7 @@ static int binder_proc_transaction(struct binder_transaction *t, bool oneway = !!(t->flags & TF_ONE_WAY); bool pending_async = false; struct binder_transaction *t_outdated = NULL; + bool frozen = false; BUG_ON(!node); binder_node_lock(node); @@ -2751,15 +2862,16 @@ static int binder_proc_transaction(struct binder_transaction *t, binder_inner_proc_lock(proc); if (proc->is_frozen) { + frozen = true; proc->sync_recv |= !oneway; proc->async_recv |= oneway; } - if ((proc->is_frozen && !oneway) || proc->is_dead || + if ((frozen && !oneway) || proc->is_dead || (thread && thread->is_dead)) { binder_inner_proc_unlock(proc); binder_node_unlock(node); - return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; + return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; } if (!thread && !pending_async) @@ -2770,7 +2882,7 @@ static int binder_proc_transaction(struct binder_transaction *t, } else if (!pending_async) { binder_enqueue_work_ilocked(&t->work, &proc->todo); } else { - if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) { + if ((t->flags & TF_UPDATE_TXN) && frozen) { t_outdated = binder_find_outdated_transaction_ilocked(t, &node->async_todo); if (t_outdated) { @@ -2801,20 +2913,23 @@ static int binder_proc_transaction(struct binder_transaction *t, t_outdated->buffer = NULL; buffer->transaction = NULL; trace_binder_transaction_update_buffer_release(buffer); - binder_transaction_buffer_release(proc, NULL, buffer, 0, 0); + binder_release_entire_buffer(proc, NULL, buffer, false); binder_alloc_free_buf(&proc->alloc, buffer); kfree(t_outdated); binder_stats_deleted(BINDER_STAT_TRANSACTION); } + if (oneway && frozen) + return BR_TRANSACTION_PENDING_FROZEN; + return 0; } /** * binder_get_node_refs_for_txn() - Get required refs on node for txn * @node: struct binder_node for which to get refs - * @proc: returns @node->proc if valid - * @error: if no @proc then returns BR_DEAD_REPLY + * @procp: returns @node->proc if valid + * @error: if no @procp then returns BR_DEAD_REPLY * * User-space normally keeps the node alive when creating a transaction * since it has a reference to the target. The local strong ref keeps it @@ -2828,8 +2943,8 @@ static int binder_proc_transaction(struct binder_transaction *t, * constructing the transaction, so we take that here as well. * * Return: The target_node with refs taken or NULL if no @node->proc is NULL. - * Also sets @proc if valid. If the @node->proc is NULL indicating that the - * target proc has died, @error is set to BR_DEAD_REPLY + * Also sets @procp if valid. If the @node->proc is NULL indicating that the + * target proc has died, @error is set to BR_DEAD_REPLY. */ static struct binder_node *binder_get_node_refs_for_txn( struct binder_node *node, @@ -2870,6 +2985,69 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id, binder_thread_dec_tmpref(from); } +/** + * binder_netlink_report() - report a transaction failure via netlink + * @proc: the binder proc sending the transaction + * @t: the binder transaction that failed + * @data_size: the user provided data size for the transaction + * @error: enum binder_driver_return_protocol returned to sender + */ +static void binder_netlink_report(struct binder_proc *proc, + struct binder_transaction *t, + u32 data_size, + u32 error) +{ + const char *context = proc->context->name; + struct sk_buff *skb; + void *hdr; + + if (!genl_has_listeners(&binder_nl_family, &init_net, + BINDER_NLGRP_REPORT)) + return; + + trace_binder_netlink_report(context, t, data_size, error); + + skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!skb) + return; + + hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT); + if (!hdr) + goto free_skb; + + if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) || + nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) || + nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) || + nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid)) + goto cancel_skb; + + if (t->to_proc && + nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid)) + goto cancel_skb; + + if (t->to_thread && + nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid)) + goto cancel_skb; + + if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY)) + goto cancel_skb; + + if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) || + nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) || + nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size)) + goto cancel_skb; + + genlmsg_end(skb, hdr); + genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT, + GFP_KERNEL); + return; + +cancel_skb: + genlmsg_cancel(skb, hdr); +free_skb: + nlmsg_free(skb); +} + static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, @@ -2896,8 +3074,8 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; int t_debug_id = atomic_inc_return(&binder_last_id); - char *secctx = NULL; - u32 secctx_sz = 0; + ktime_t t_start_time = ktime_get(); + struct lsm_context lsmctx = { }; struct list_head sgc_head; struct list_head pf_head; const void __user *user_buffer = (const void __user *) @@ -2919,6 +3097,32 @@ static void binder_transaction(struct binder_proc *proc, binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); binder_inner_proc_unlock(proc); + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (!t) { + binder_txn_error("%d:%d cannot allocate transaction\n", + thread->pid, proc->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -ENOMEM; + return_error_line = __LINE__; + goto err_alloc_t_failed; + } + INIT_LIST_HEAD(&t->fd_fixups); + binder_stats_created(BINDER_STAT_TRANSACTION); + spin_lock_init(&t->lock); + t->debug_id = t_debug_id; + t->start_time = t_start_time; + t->from_pid = proc->pid; + t->from_tid = thread->pid; + t->sender_euid = task_euid(proc->tsk); + t->code = tr->code; + t->flags = tr->flags; + t->priority = task_nice(current); + t->work.type = BINDER_WORK_TRANSACTION; + t->is_async = !reply && (tr->flags & TF_ONE_WAY); + t->is_reply = reply; + if (!reply && !(tr->flags & TF_ONE_WAY)) + t->from = thread; + if (reply) { binder_inner_proc_lock(proc); in_reply_to = thread->transaction_stack; @@ -3022,10 +3226,8 @@ static void binder_transaction(struct binder_proc *proc, } if (!target_node) { binder_txn_error("%d:%d cannot find target node\n", - thread->pid, proc->pid); - /* - * return_error is set above - */ + proc->pid, thread->pid); + /* return_error is set above */ return_error_param = -EINVAL; return_error_line = __LINE__; goto err_dead_binder; @@ -3107,24 +3309,13 @@ static void binder_transaction(struct binder_proc *proc, } binder_inner_proc_unlock(proc); } + + t->to_proc = target_proc; + t->to_thread = target_thread; if (target_thread) e->to_thread = target_thread->pid; e->to_proc = target_proc->pid; - /* TODO: reuse incoming transaction for reply */ - t = kzalloc(sizeof(*t), GFP_KERNEL); - if (t == NULL) { - binder_txn_error("%d:%d cannot allocate transaction\n", - thread->pid, proc->pid); - return_error = BR_FAILED_REPLY; - return_error_param = -ENOMEM; - return_error_line = __LINE__; - goto err_alloc_t_failed; - } - INIT_LIST_HEAD(&t->fd_fixups); - binder_stats_created(BINDER_STAT_TRANSACTION); - spin_lock_init(&t->lock); - tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { binder_txn_error("%d:%d cannot allocate work for transaction\n", @@ -3136,45 +3327,28 @@ static void binder_transaction(struct binder_proc *proc, } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); - t->debug_id = t_debug_id; - if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, - "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", + "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_thread->pid, - (u64)tr->data.ptr.buffer, - (u64)tr->data.ptr.offsets, (u64)tr->data_size, (u64)tr->offsets_size, (u64)extra_buffers_size); else binder_debug(BINDER_DEBUG_TRANSACTION, - "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", + "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_node->debug_id, - (u64)tr->data.ptr.buffer, - (u64)tr->data.ptr.offsets, (u64)tr->data_size, (u64)tr->offsets_size, (u64)extra_buffers_size); - if (!reply && !(tr->flags & TF_ONE_WAY)) - t->from = thread; - else - t->from = NULL; - t->sender_euid = task_euid(proc->tsk); - t->to_proc = target_proc; - t->to_thread = target_thread; - t->code = tr->code; - t->flags = tr->flags; - t->priority = task_nice(current); - if (target_node && target_node->txn_security_ctx) { u32 secid; size_t added_size; security_cred_getsecid(proc->cred, &secid); - ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); - if (ret) { + ret = security_secid_to_secctx(secid, &lsmctx); + if (ret < 0) { binder_txn_error("%d:%d failed to get security context\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; @@ -3182,7 +3356,7 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_get_secctx_failed; } - added_size = ALIGN(secctx_sz, sizeof(u64)); + added_size = ALIGN(lsmctx.len, sizeof(u64)); extra_buffers_size += added_size; if (extra_buffers_size < added_size) { binder_txn_error("%d:%d integer overflow of extra_buffers_size\n", @@ -3198,7 +3372,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, - !reply && (t->flags & TF_ONE_WAY), current->tgid); + !reply && (t->flags & TF_ONE_WAY)); if (IS_ERR(t->buffer)) { char *s; @@ -3216,23 +3390,23 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = NULL; goto err_binder_alloc_buf_failed; } - if (secctx) { + if (lsmctx.context) { int err; size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + ALIGN(tr->offsets_size, sizeof(void *)) + ALIGN(extra_buffers_size, sizeof(void *)) - - ALIGN(secctx_sz, sizeof(u64)); + ALIGN(lsmctx.len, sizeof(u64)); - t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; + t->security_ctx = t->buffer->user_data + buf_offset; err = binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, buf_offset, - secctx, secctx_sz); + lsmctx.context, lsmctx.len); if (err) { t->security_ctx = 0; WARN_ON(1); } - security_release_secctx(secctx, secctx_sz); - secctx = NULL; + security_release_secctx(&lsmctx); + lsmctx.context = NULL; } t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; @@ -3276,7 +3450,7 @@ static void binder_transaction(struct binder_proc *proc, off_end_offset = off_start_offset + tr->offsets_size; sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); sg_buf_end_offset = sg_buf_offset + extra_buffers_size - - ALIGN(secctx_sz, sizeof(u64)); + ALIGN(lsmctx.len, sizeof(u64)); off_min = 0; for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { @@ -3305,6 +3479,7 @@ static void binder_transaction(struct binder_proc *proc, */ copy_size = object_offset - user_offset; if (copy_size && (user_offset > object_offset || + object_offset > tr->data_size || binder_alloc_copy_user_to_buffer( &target_proc->alloc, t->buffer, user_offset, @@ -3500,8 +3675,7 @@ static void binder_transaction(struct binder_proc *proc, goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ - bp->buffer = (uintptr_t) - t->buffer->user_data + sg_buf_offset; + bp->buffer = t->buffer->user_data + sg_buf_offset; sg_buf_offset += ALIGN(bp->length, sizeof(u64)); num_valid = (buffer_offset - off_start_offset) / @@ -3560,11 +3734,13 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_copy_data_failed; } - if (t->buffer->oneway_spam_suspect) + if (t->buffer->oneway_spam_suspect) { tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; - else + binder_netlink_report(proc, t, tr->data_size, + BR_ONEWAY_SPAM_SUSPECT); + } else { tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; - t->work.type = BINDER_WORK_TRANSACTION; + } if (reply) { binder_enqueue_thread_work(thread, tcomplete); @@ -3592,7 +3768,6 @@ static void binder_transaction(struct binder_proc *proc, * the target replies (or there is an error). */ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); - t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; binder_inner_proc_unlock(proc); @@ -3607,9 +3782,20 @@ static void binder_transaction(struct binder_proc *proc, } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); - binder_enqueue_thread_work(thread, tcomplete); return_error = binder_proc_transaction(t, target_proc, NULL); - if (return_error) + /* + * Let the caller know when async transaction reaches a frozen + * process and is put in a pending queue, waiting for the target + * process to be unfrozen. + */ + if (return_error == BR_TRANSACTION_PENDING_FROZEN) { + tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; + binder_netlink_report(proc, t, tr->data_size, + return_error); + } + binder_enqueue_thread_work(thread, tcomplete); + if (return_error && + return_error != BR_TRANSACTION_PENDING_FROZEN) goto err_dead_proc_or_thread; } if (target_thread) @@ -3647,17 +3833,14 @@ err_copy_data_failed: binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: err_bad_extra_size: - if (secctx) - security_release_secctx(secctx, secctx_sz); + if (lsmctx.context) + security_release_secctx(&lsmctx); err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: if (trace_binder_txn_latency_free_enabled()) binder_txn_latency_free(t); - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); -err_alloc_t_failed: err_bad_todo_list: err_bad_call_stack: err_empty_call_stack: @@ -3668,14 +3851,19 @@ err_invalid_target_handle: binder_dec_node_tmpref(target_node); } + binder_netlink_report(proc, t, tr->data_size, return_error); + kfree(t); + binder_stats_deleted(BINDER_STAT_TRANSACTION); +err_alloc_t_failed: + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", + "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n", proc->pid, thread->pid, reply ? "reply" : (tr->flags & TF_ONE_WAY ? "async" : "call"), target_proc ? target_proc->pid : 0, target_thread ? target_thread->pid : 0, t_debug_id, return_error, return_error_param, - (u64)tr->data_size, (u64)tr->offsets_size, + tr->code, (u64)tr->data_size, (u64)tr->offsets_size, return_error_line); if (target_thread) @@ -3717,16 +3905,164 @@ err_invalid_target_handle: } } +static int +binder_request_freeze_notification(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_handle_cookie *handle_cookie) +{ + struct binder_ref_freeze *freeze; + struct binder_ref *ref; + + freeze = kzalloc(sizeof(*freeze), GFP_KERNEL); + if (!freeze) + return -ENOMEM; + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, handle_cookie->handle, false); + if (!ref) { + binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n", + proc->pid, thread->pid, handle_cookie->handle); + binder_proc_unlock(proc); + kfree(freeze); + return -EINVAL; + } + + binder_node_lock(ref->node); + if (ref->freeze) { + binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n", + proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + kfree(freeze); + return -EINVAL; + } + + binder_stats_created(BINDER_STAT_FREEZE); + INIT_LIST_HEAD(&freeze->work.entry); + freeze->cookie = handle_cookie->cookie; + freeze->work.type = BINDER_WORK_FROZEN_BINDER; + ref->freeze = freeze; + + if (ref->node->proc) { + binder_inner_proc_lock(ref->node->proc); + freeze->is_frozen = ref->node->proc->is_frozen; + binder_inner_proc_unlock(ref->node->proc); + + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked(&freeze->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + binder_inner_proc_unlock(proc); + } + + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + return 0; +} + +static int +binder_clear_freeze_notification(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_handle_cookie *handle_cookie) +{ + struct binder_ref_freeze *freeze; + struct binder_ref *ref; + + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, handle_cookie->handle, false); + if (!ref) { + binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n", + proc->pid, thread->pid, handle_cookie->handle); + binder_proc_unlock(proc); + return -EINVAL; + } + + binder_node_lock(ref->node); + + if (!ref->freeze) { + binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n", + proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + return -EINVAL; + } + freeze = ref->freeze; + binder_inner_proc_lock(proc); + if (freeze->cookie != handle_cookie->cookie) { + binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n", + proc->pid, thread->pid, (u64)freeze->cookie, + (u64)handle_cookie->cookie); + binder_inner_proc_unlock(proc); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + return -EINVAL; + } + ref->freeze = NULL; + /* + * Take the existing freeze object and overwrite its work type. There are three cases here: + * 1. No pending notification. In this case just add the work to the queue. + * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we + * should resend with the new work type. + * 3. A notification is pending to be sent. Since the work is already in the queue, nothing + * needs to be done here. + */ + freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION; + if (list_empty(&freeze->work.entry)) { + binder_enqueue_work_ilocked(&freeze->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } else if (freeze->sent) { + freeze->resend = true; + } + binder_inner_proc_unlock(proc); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + return 0; +} + +static int +binder_freeze_notification_done(struct binder_proc *proc, + struct binder_thread *thread, + binder_uintptr_t cookie) +{ + struct binder_ref_freeze *freeze = NULL; + struct binder_work *w; + + binder_inner_proc_lock(proc); + list_for_each_entry(w, &proc->delivered_freeze, entry) { + struct binder_ref_freeze *tmp_freeze = + container_of(w, struct binder_ref_freeze, work); + + if (tmp_freeze->cookie == cookie) { + freeze = tmp_freeze; + break; + } + } + if (!freeze) { + binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n", + proc->pid, thread->pid, (u64)cookie); + binder_inner_proc_unlock(proc); + return -EINVAL; + } + binder_dequeue_work_ilocked(&freeze->work); + freeze->sent = false; + if (freeze->resend) { + freeze->resend = false; + binder_enqueue_work_ilocked(&freeze->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + binder_inner_proc_unlock(proc); + return 0; +} + /** * binder_free_buf() - free the specified buffer - * @proc: binder proc that owns buffer - * @buffer: buffer to be freed - * @is_failure: failed to send transaction + * @proc: binder proc that owns buffer + * @thread: binder thread performing the buffer release + * @buffer: buffer to be freed + * @is_failure: failed to send transaction * - * If buffer for an async transaction, enqueue the next async + * If the buffer is for an async transaction, enqueue the next async * transaction from the node. * - * Cleanup buffer and free it. + * Cleanup the buffer and free it. */ static void binder_free_buf(struct binder_proc *proc, @@ -3759,7 +4095,7 @@ binder_free_buf(struct binder_proc *proc, binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure); + binder_release_entire_buffer(proc, thread, buffer, is_failure); binder_alloc_free_buf(&proc->alloc, buffer); } @@ -3945,20 +4281,21 @@ static int binder_thread_write(struct binder_proc *proc, if (IS_ERR_OR_NULL(buffer)) { if (PTR_ERR(buffer) == -EPERM) { binder_user_error( - "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", + "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n", proc->pid, thread->pid, - (u64)data_ptr); + (unsigned long)data_ptr - proc->alloc.vm_start); } else { binder_user_error( - "%d:%d BC_FREE_BUFFER u%016llx no match\n", + "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n", proc->pid, thread->pid, - (u64)data_ptr); + (unsigned long)data_ptr - proc->alloc.vm_start); } break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, - "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", - proc->pid, thread->pid, (u64)data_ptr, + "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n", + proc->pid, thread->pid, + (unsigned long)data_ptr - proc->alloc.vm_start, buffer->debug_id, buffer->transaction ? "active" : "finished"); binder_free_buf(proc, thread, buffer, false); @@ -4200,6 +4537,44 @@ static int binder_thread_write(struct binder_proc *proc, binder_inner_proc_unlock(proc); } break; + case BC_REQUEST_FREEZE_NOTIFICATION: { + struct binder_handle_cookie handle_cookie; + int error; + + if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie))) + return -EFAULT; + ptr += sizeof(handle_cookie); + error = binder_request_freeze_notification(proc, thread, + &handle_cookie); + if (error) + return error; + } break; + + case BC_CLEAR_FREEZE_NOTIFICATION: { + struct binder_handle_cookie handle_cookie; + int error; + + if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie))) + return -EFAULT; + ptr += sizeof(handle_cookie); + error = binder_clear_freeze_notification(proc, thread, &handle_cookie); + if (error) + return error; + } break; + + case BC_FREEZE_NOTIFICATION_DONE: { + binder_uintptr_t cookie; + int error; + + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + + ptr += sizeof(cookie); + error = binder_freeze_notification_done(proc, thread, cookie); + if (error) + return error; + } break; + default: pr_err("%d:%d unknown command %u\n", proc->pid, thread->pid, cmd); @@ -4294,6 +4669,8 @@ static int binder_wait_for_work(struct binder_thread *thread, * * If we fail to allocate an fd, skip the install and release * any fds that have already been allocated. + * + * Return: 0 on success, a negative errno code on failure. */ static int binder_apply_fd_fixups(struct binder_proc *proc, struct binder_transaction *t) @@ -4440,10 +4817,13 @@ retry: binder_stat_br(proc, thread, cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: + case BINDER_WORK_TRANSACTION_PENDING: case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { if (proc->oneway_spam_detection_enabled && w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) cmd = BR_ONEWAY_SPAM_SUSPECT; + else if (w->type == BINDER_WORK_TRANSACTION_PENDING) + cmd = BR_TRANSACTION_PENDING_FROZEN; else cmd = BR_TRANSACTION_COMPLETE; binder_inner_proc_unlock(proc); @@ -4586,6 +4966,46 @@ retry: if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; + + case BINDER_WORK_FROZEN_BINDER: { + struct binder_ref_freeze *freeze; + struct binder_frozen_state_info info; + + memset(&info, 0, sizeof(info)); + freeze = container_of(w, struct binder_ref_freeze, work); + info.is_frozen = freeze->is_frozen; + info.cookie = freeze->cookie; + freeze->sent = true; + binder_enqueue_work_ilocked(w, &proc->delivered_freeze); + binder_inner_proc_unlock(proc); + + if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (copy_to_user(ptr, &info, sizeof(info))) + return -EFAULT; + ptr += sizeof(info); + binder_stat_br(proc, thread, BR_FROZEN_BINDER); + goto done; /* BR_FROZEN_BINDER notifications can cause transactions */ + } break; + + case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: { + struct binder_ref_freeze *freeze = + container_of(w, struct binder_ref_freeze, work); + binder_uintptr_t cookie = freeze->cookie; + + binder_inner_proc_unlock(proc); + kfree(freeze); + binder_stats_deleted(BINDER_STAT_FREEZE); + if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (put_user(cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE); + } break; + default: binder_inner_proc_unlock(proc); pr_err("%d:%d: bad work type %d\n", @@ -4660,7 +5080,7 @@ retry: } trd->data_size = t->buffer->data_size; trd->offsets_size = t->buffer->offsets_size; - trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; + trd->data.ptr.buffer = t->buffer->user_data; trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); @@ -4694,16 +5114,14 @@ retry: trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, - "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", + "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : (cmd == BR_TRANSACTION_SEC_CTX) ? "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, - t->buffer->data_size, t->buffer->offsets_size, - (u64)trd->data.ptr.buffer, - (u64)trd->data.ptr.offsets); + t->buffer->data_size, t->buffer->offsets_size); if (t_from) binder_thread_dec_tmpref(t_from); @@ -4774,6 +5192,8 @@ static void binder_release_work(struct binder_proc *proc, "undelivered TRANSACTION_ERROR: %u\n", e->cmd); } break; + case BINDER_WORK_TRANSACTION_PENDING: + case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_COMPLETE\n"); @@ -4793,6 +5213,16 @@ static void binder_release_work(struct binder_proc *proc, } break; case BINDER_WORK_NODE: break; + case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: { + struct binder_ref_freeze *freeze; + + freeze = container_of(w, struct binder_ref_freeze, work); + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered freeze notification, %016llx\n", + (u64)freeze->cookie); + kfree(freeze); + binder_stats_deleted(BINDER_STAT_FREEZE); + } break; default: pr_err("unexpected work type, %d, not freed\n", wtype); @@ -4873,6 +5303,7 @@ static void binder_free_proc(struct binder_proc *proc) __func__, proc->outstanding_txns); device = container_of(proc->context, struct binder_device, context); if (refcount_dec_and_test(&device->ref)) { + binder_remove_device(device); kfree(proc->context->name); kfree(device); } @@ -4880,6 +5311,7 @@ static void binder_free_proc(struct binder_proc *proc) put_task_struct(proc->tsk); put_cred(proc->cred); binder_stats_deleted(BINDER_STAT_PROC); + dbitmap_free(&proc->dmap); kfree(proc); } @@ -4990,7 +5422,7 @@ static __poll_t binder_poll(struct file *filp, thread = binder_get_thread(proc); if (!thread) - return POLLERR; + return EPOLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; @@ -5006,24 +5438,17 @@ static __poll_t binder_poll(struct file *filp, return 0; } -static int binder_ioctl_write_read(struct file *filp, - unsigned int cmd, unsigned long arg, +static int binder_ioctl_write_read(struct file *filp, unsigned long arg, struct binder_thread *thread) { int ret = 0; struct binder_proc *proc = filp->private_data; - unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; struct binder_write_read bwr; - if (size != sizeof(struct binder_write_read)) { - ret = -EINVAL; - goto out; - } - if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { - ret = -EFAULT; - goto out; - } + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) + return -EFAULT; + binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d write %lld at %016llx, read %lld at %016llx\n", proc->pid, thread->pid, @@ -5038,8 +5463,6 @@ static int binder_ioctl_write_read(struct file *filp, trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) - ret = -EFAULT; goto out; } } @@ -5053,22 +5476,17 @@ static int binder_ioctl_write_read(struct file *filp, if (!binder_worklist_empty_ilocked(&proc->todo)) binder_wakeup_proc_ilocked(proc); binder_inner_proc_unlock(proc); - if (ret < 0) { - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) - ret = -EFAULT; + if (ret < 0) goto out; - } } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d wrote %lld of %lld, read return %lld of %lld\n", proc->pid, thread->pid, (u64)bwr.write_consumed, (u64)bwr.write_size, (u64)bwr.read_consumed, (u64)bwr.read_size); - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { - ret = -EFAULT; - goto out; - } out: + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) + ret = -EFAULT; return ret; } @@ -5081,32 +5499,28 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, struct binder_node *new_node; kuid_t curr_euid = current_euid(); - mutex_lock(&context->context_mgr_node_lock); + guard(mutex)(&context->context_mgr_node_lock); if (context->binder_context_mgr_node) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); - ret = -EBUSY; - goto out; + return -EBUSY; } ret = security_binder_set_context_mgr(proc->cred); if (ret < 0) - goto out; + return ret; if (uid_valid(context->binder_context_mgr_uid)) { if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", from_kuid(&init_user_ns, curr_euid), from_kuid(&init_user_ns, context->binder_context_mgr_uid)); - ret = -EPERM; - goto out; + return -EPERM; } } else { context->binder_context_mgr_uid = curr_euid; } new_node = binder_new_node(proc, fbo); - if (!new_node) { - ret = -ENOMEM; - goto out; - } + if (!new_node) + return -ENOMEM; binder_node_lock(new_node); new_node->local_weak_refs++; new_node->local_strong_refs++; @@ -5115,8 +5529,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, context->binder_context_mgr_node = new_node; binder_node_unlock(new_node); binder_put_node(new_node); -out: - mutex_unlock(&context->context_mgr_node_lock); return ret; } @@ -5197,6 +5609,57 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc) return false; } +static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen) +{ + struct binder_node *prev = NULL; + struct rb_node *n; + struct binder_ref *ref; + + binder_inner_proc_lock(proc); + for (n = rb_first(&proc->nodes); n; n = rb_next(n)) { + struct binder_node *node; + + node = rb_entry(n, struct binder_node, rb_node); + binder_inc_node_tmpref_ilocked(node); + binder_inner_proc_unlock(proc); + if (prev) + binder_put_node(prev); + binder_node_lock(node); + hlist_for_each_entry(ref, &node->refs, node_entry) { + /* + * Need the node lock to synchronize + * with new notification requests and the + * inner lock to synchronize with queued + * freeze notifications. + */ + binder_inner_proc_lock(ref->proc); + if (!ref->freeze) { + binder_inner_proc_unlock(ref->proc); + continue; + } + ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER; + if (list_empty(&ref->freeze->work.entry)) { + ref->freeze->is_frozen = is_frozen; + binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo); + binder_wakeup_proc_ilocked(ref->proc); + } else { + if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen) + ref->freeze->resend = true; + ref->freeze->is_frozen = is_frozen; + } + binder_inner_proc_unlock(ref->proc); + } + prev = node; + binder_node_unlock(node); + binder_inner_proc_lock(proc); + if (proc->is_dead) + break; + } + binder_inner_proc_unlock(proc); + if (prev) + binder_put_node(prev); +} + static int binder_ioctl_freeze(struct binder_freeze_info *info, struct binder_proc *target_proc) { @@ -5208,6 +5671,7 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info, target_proc->async_recv = false; target_proc->is_frozen = false; binder_inner_proc_unlock(target_proc); + binder_add_freeze_work(target_proc, false); return 0; } @@ -5240,6 +5704,8 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info, binder_inner_proc_lock(target_proc); target_proc->is_frozen = false; binder_inner_proc_unlock(target_proc); + } else { + binder_add_freeze_work(target_proc, true); } return ret; @@ -5296,14 +5762,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; - unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; - /*pr_info("binder_ioctl: %d:%d %x %lx\n", - proc->pid, current->pid, cmd, arg);*/ - - binder_selftest_alloc(&proc->alloc); - trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); @@ -5318,12 +5778,12 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case BINDER_WRITE_READ: - ret = binder_ioctl_write_read(filp, cmd, arg, thread); + ret = binder_ioctl_write_read(filp, arg, thread); if (ret) goto err; break; case BINDER_SET_MAX_THREADS: { - int max_threads; + u32 max_threads; if (copy_from_user(&max_threads, ubuf, sizeof(max_threads))) { @@ -5361,10 +5821,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case BINDER_VERSION: { struct binder_version __user *ver = ubuf; - if (size != sizeof(struct binder_version)) { - ret = -EINVAL; - goto err; - } if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) { ret = -EINVAL; @@ -5543,10 +5999,11 @@ static void binder_vma_close(struct vm_area_struct *vma) binder_alloc_vma_close(&proc->alloc); } -static vm_fault_t binder_vm_fault(struct vm_fault *vmf) +VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } +EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault); static const struct vm_operations_struct binder_vm_ops = { .open = binder_vma_open, @@ -5572,8 +6029,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); return -EPERM; } - vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; - vma->vm_flags &= ~VM_MAYWRITE; + vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE); vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; @@ -5595,6 +6051,8 @@ static int binder_open(struct inode *nodp, struct file *filp) proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; + + dbitmap_init(&proc->dmap); spin_lock_init(&proc->inner_lock); spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); @@ -5619,6 +6077,7 @@ static int binder_open(struct inode *nodp, struct file *filp) binder_stats_created(BINDER_STAT_PROC); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); + INIT_LIST_HEAD(&proc->delivered_freeze); INIT_LIST_HEAD(&proc->waiting_threads); filp->private_data = proc; @@ -5713,7 +6172,7 @@ static int binder_release(struct inode *nodp, struct file *filp) debugfs_remove(proc->debugfs_entry); if (proc->binderfs_entry) { - binderfs_remove_file(proc->binderfs_entry); + simple_recursive_removal(proc->binderfs_entry, NULL); proc->binderfs_entry = NULL; } @@ -5865,6 +6324,7 @@ static void binder_deferred_release(struct binder_proc *proc) binder_release_work(proc, &proc->todo); binder_release_work(proc, &proc->delivered_death); + binder_release_work(proc, &proc->delivered_freeze); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", @@ -5906,14 +6366,13 @@ static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { - mutex_lock(&binder_deferred_lock); + guard(mutex)(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, &binder_deferred_list); schedule_work(&binder_deferred_work); } - mutex_unlock(&binder_deferred_lock); } static void print_binder_transaction_ilocked(struct seq_file *m, @@ -5923,17 +6382,19 @@ static void print_binder_transaction_ilocked(struct seq_file *m, { struct binder_proc *to_proc; struct binder_buffer *buffer = t->buffer; + ktime_t current_time = ktime_get(); spin_lock(&t->lock); to_proc = t->to_proc; seq_printf(m, - "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms", prefix, t->debug_id, t, - t->from ? t->from->proc->pid : 0, - t->from ? t->from->pid : 0, + t->from_pid, + t->from_tid, to_proc ? to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, - t->code, t->flags, t->priority, t->need_reply); + t->code, t->flags, t->priority, t->is_async, t->is_reply, + ktime_ms_delta(current_time, t->start_time)); spin_unlock(&t->lock); if (proc != to_proc) { @@ -5951,16 +6412,16 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %pK\n", + seq_printf(m, " size %zd:%zd offset %lx\n", buffer->data_size, buffer->offsets_size, - buffer->user_data); + buffer->user_data - proc->alloc.vm_start); } static void print_binder_work_ilocked(struct seq_file *m, - struct binder_proc *proc, - const char *prefix, - const char *transaction_prefix, - struct binder_work *w) + struct binder_proc *proc, + const char *prefix, + const char *transaction_prefix, + struct binder_work *w, bool hash_ptrs) { struct binder_node *node; struct binder_transaction *t; @@ -5983,9 +6444,15 @@ static void print_binder_work_ilocked(struct seq_file *m, break; case BINDER_WORK_NODE: node = container_of(w, struct binder_node, work); - seq_printf(m, "%snode work %d: u%016llx c%016llx\n", - prefix, node->debug_id, - (u64)node->ptr, (u64)node->cookie); + if (hash_ptrs) + seq_printf(m, "%snode work %d: u%p c%p\n", + prefix, node->debug_id, + (void *)(long)node->ptr, + (void *)(long)node->cookie); + else + seq_printf(m, "%snode work %d: u%016llx c%016llx\n", + prefix, node->debug_id, + (u64)node->ptr, (u64)node->cookie); break; case BINDER_WORK_DEAD_BINDER: seq_printf(m, "%shas dead binder\n", prefix); @@ -5996,6 +6463,12 @@ static void print_binder_work_ilocked(struct seq_file *m, case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: seq_printf(m, "%shas cleared death notification\n", prefix); break; + case BINDER_WORK_FROZEN_BINDER: + seq_printf(m, "%shas frozen binder\n", prefix); + break; + case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: + seq_printf(m, "%shas cleared freeze notification\n", prefix); + break; default: seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); break; @@ -6004,7 +6477,7 @@ static void print_binder_work_ilocked(struct seq_file *m, static void print_binder_thread_ilocked(struct seq_file *m, struct binder_thread *thread, - int print_always) + bool print_always, bool hash_ptrs) { struct binder_transaction *t; struct binder_work *w; @@ -6034,25 +6507,30 @@ static void print_binder_thread_ilocked(struct seq_file *m, } list_for_each_entry(w, &thread->todo, entry) { print_binder_work_ilocked(m, thread->proc, " ", - " pending transaction", w); + " pending transaction", + w, hash_ptrs); } if (!print_always && m->count == header_pos) m->count = start_pos; } static void print_binder_node_nilocked(struct seq_file *m, - struct binder_node *node) + struct binder_node *node, + bool hash_ptrs) { struct binder_ref *ref; struct binder_work *w; int count; - count = 0; - hlist_for_each_entry(ref, &node->refs, node_entry) - count++; + count = hlist_count_nodes(&node->refs); - seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", - node->debug_id, (u64)node->ptr, (u64)node->cookie, + if (hash_ptrs) + seq_printf(m, " node %d: u%p c%p", node->debug_id, + (void *)(long)node->ptr, (void *)(long)node->cookie); + else + seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id, + (u64)node->ptr, (u64)node->cookie); + seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d", node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count, node->tmp_refs); @@ -6065,7 +6543,8 @@ static void print_binder_node_nilocked(struct seq_file *m, if (node->proc) { list_for_each_entry(w, &node->async_todo, entry) print_binder_work_ilocked(m, node->proc, " ", - " pending async transaction", w); + " pending async transaction", + w, hash_ptrs); } } @@ -6081,8 +6560,54 @@ static void print_binder_ref_olocked(struct seq_file *m, binder_node_unlock(ref->node); } -static void print_binder_proc(struct seq_file *m, - struct binder_proc *proc, int print_all) +/** + * print_next_binder_node_ilocked() - Print binder_node from a locked list + * @m: struct seq_file for output via seq_printf() + * @proc: struct binder_proc we hold the inner_proc_lock to (if any) + * @node: struct binder_node to print fields of + * @prev_node: struct binder_node we hold a temporary reference to (if any) + * @hash_ptrs: whether to hash @node's binder_uintptr_t fields + * + * Helper function to handle synchronization around printing a struct + * binder_node while iterating through @proc->nodes or the dead nodes list. + * Caller must hold either @proc->inner_lock (for live nodes) or + * binder_dead_nodes_lock. This lock will be released during the body of this + * function, but it will be reacquired before returning to the caller. + * + * Return: pointer to the struct binder_node we hold a tmpref on + */ +static struct binder_node * +print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc, + struct binder_node *node, + struct binder_node *prev_node, bool hash_ptrs) +{ + /* + * Take a temporary reference on the node so that isn't freed while + * we print it. + */ + binder_inc_node_tmpref_ilocked(node); + /* + * Live nodes need to drop the inner proc lock and dead nodes need to + * drop the binder_dead_nodes_lock before trying to take the node lock. + */ + if (proc) + binder_inner_proc_unlock(proc); + else + spin_unlock(&binder_dead_nodes_lock); + if (prev_node) + binder_put_node(prev_node); + binder_node_inner_lock(node); + print_binder_node_nilocked(m, node, hash_ptrs); + binder_node_inner_unlock(node); + if (proc) + binder_inner_proc_lock(proc); + else + spin_lock(&binder_dead_nodes_lock); + return node; +} + +static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, + bool print_all, bool hash_ptrs) { struct binder_work *w; struct rb_node *n; @@ -6095,31 +6620,19 @@ static void print_binder_proc(struct seq_file *m, header_pos = m->count; binder_inner_proc_lock(proc); - for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) + for (n = rb_first(&proc->threads); n; n = rb_next(n)) print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, - rb_node), print_all); + rb_node), print_all, hash_ptrs); - for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { + for (n = rb_first(&proc->nodes); n; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); if (!print_all && !node->has_async_transaction) continue; - /* - * take a temporary reference on the node so it - * survives and isn't removed from the tree - * while we print it. - */ - binder_inc_node_tmpref_ilocked(node); - /* Need to drop inner lock to take node lock */ - binder_inner_proc_unlock(proc); - if (last_node) - binder_put_node(last_node); - binder_node_inner_lock(node); - print_binder_node_nilocked(m, node); - binder_node_inner_unlock(node); - last_node = node; - binder_inner_proc_lock(proc); + last_node = print_next_binder_node_ilocked(m, proc, node, + last_node, + hash_ptrs); } binder_inner_proc_unlock(proc); if (last_node) @@ -6127,23 +6640,26 @@ static void print_binder_proc(struct seq_file *m, if (print_all) { binder_proc_lock(proc); - for (n = rb_first(&proc->refs_by_desc); - n != NULL; - n = rb_next(n)) + for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) print_binder_ref_olocked(m, rb_entry(n, - struct binder_ref, - rb_node_desc)); + struct binder_ref, + rb_node_desc)); binder_proc_unlock(proc); } binder_alloc_print_allocated(m, &proc->alloc); binder_inner_proc_lock(proc); list_for_each_entry(w, &proc->todo, entry) print_binder_work_ilocked(m, proc, " ", - " pending transaction", w); + " pending transaction", w, + hash_ptrs); list_for_each_entry(w, &proc->delivered_death, entry) { seq_puts(m, " has delivered dead binder\n"); break; } + list_for_each_entry(w, &proc->delivered_freeze, entry) { + seq_puts(m, " has delivered freeze binder\n"); + break; + } binder_inner_proc_unlock(proc); if (!print_all && m->count == header_pos) m->count = start_pos; @@ -6170,6 +6686,9 @@ static const char * const binder_return_strings[] = { "BR_FAILED_REPLY", "BR_FROZEN_REPLY", "BR_ONEWAY_SPAM_SUSPECT", + "BR_TRANSACTION_PENDING_FROZEN", + "BR_FROZEN_BINDER", + "BR_CLEAR_FREEZE_NOTIFICATION_DONE", }; static const char * const binder_command_strings[] = { @@ -6192,6 +6711,9 @@ static const char * const binder_command_strings[] = { "BC_DEAD_BINDER_DONE", "BC_TRANSACTION_SG", "BC_REPLY_SG", + "BC_REQUEST_FREEZE_NOTIFICATION", + "BC_CLEAR_FREEZE_NOTIFICATION", + "BC_FREEZE_NOTIFICATION_DONE", }; static const char * const binder_objstat_strings[] = { @@ -6201,7 +6723,8 @@ static const char * const binder_objstat_strings[] = { "ref", "death", "transaction", - "transaction_complete" + "transaction_complete", + "freeze", }; static void print_binder_stats(struct seq_file *m, const char *prefix, @@ -6261,7 +6784,7 @@ static void print_binder_proc_stats(struct seq_file *m, count = 0; ready_threads = 0; binder_inner_proc_lock(proc); - for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) + for (n = rb_first(&proc->threads); n; n = rb_next(n)) count++; list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) @@ -6275,7 +6798,7 @@ static void print_binder_proc_stats(struct seq_file *m, ready_threads, free_async_space); count = 0; - for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) + for (n = rb_first(&proc->nodes); n; n = rb_next(n)) count++; binder_inner_proc_unlock(proc); seq_printf(m, " nodes: %d\n", count); @@ -6283,7 +6806,7 @@ static void print_binder_proc_stats(struct seq_file *m, strong = 0; weak = 0; binder_proc_lock(proc); - for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { + for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); count++; @@ -6310,7 +6833,7 @@ static void print_binder_proc_stats(struct seq_file *m, print_binder_stats(m, " ", &proc->stats); } -static int state_show(struct seq_file *m, void *unused) +static void print_binder_state(struct seq_file *m, bool hash_ptrs) { struct binder_proc *proc; struct binder_node *node; @@ -6321,31 +6844,40 @@ static int state_show(struct seq_file *m, void *unused) spin_lock(&binder_dead_nodes_lock); if (!hlist_empty(&binder_dead_nodes)) seq_puts(m, "dead nodes:\n"); - hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { - /* - * take a temporary reference on the node so it - * survives and isn't removed from the list - * while we print it. - */ - node->tmp_refs++; - spin_unlock(&binder_dead_nodes_lock); - if (last_node) - binder_put_node(last_node); - binder_node_lock(node); - print_binder_node_nilocked(m, node); - binder_node_unlock(node); - last_node = node; - spin_lock(&binder_dead_nodes_lock); - } + hlist_for_each_entry(node, &binder_dead_nodes, dead_node) + last_node = print_next_binder_node_ilocked(m, NULL, node, + last_node, + hash_ptrs); spin_unlock(&binder_dead_nodes_lock); if (last_node) binder_put_node(last_node); mutex_lock(&binder_procs_lock); hlist_for_each_entry(proc, &binder_procs, proc_node) - print_binder_proc(m, proc, 1); + print_binder_proc(m, proc, true, hash_ptrs); mutex_unlock(&binder_procs_lock); +} + +static void print_binder_transactions(struct seq_file *m, bool hash_ptrs) +{ + struct binder_proc *proc; + seq_puts(m, "binder transactions:\n"); + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc(m, proc, false, hash_ptrs); + mutex_unlock(&binder_procs_lock); +} + +static int state_show(struct seq_file *m, void *unused) +{ + print_binder_state(m, false); + return 0; +} + +static int state_hashed_show(struct seq_file *m, void *unused) +{ + print_binder_state(m, true); return 0; } @@ -6367,14 +6899,13 @@ static int stats_show(struct seq_file *m, void *unused) static int transactions_show(struct seq_file *m, void *unused) { - struct binder_proc *proc; - - seq_puts(m, "binder transactions:\n"); - mutex_lock(&binder_procs_lock); - hlist_for_each_entry(proc, &binder_procs, proc_node) - print_binder_proc(m, proc, 0); - mutex_unlock(&binder_procs_lock); + print_binder_transactions(m, false); + return 0; +} +static int transactions_hashed_show(struct seq_file *m, void *unused) +{ + print_binder_transactions(m, true); return 0; } @@ -6383,14 +6914,13 @@ static int proc_show(struct seq_file *m, void *unused) struct binder_proc *itr; int pid = (unsigned long)m->private; - mutex_lock(&binder_procs_lock); + guard(mutex)(&binder_procs_lock); hlist_for_each_entry(itr, &binder_procs, proc_node) { if (itr->pid == pid) { seq_puts(m, "binder proc state:\n"); - print_binder_proc(m, itr, 1); + print_binder_proc(m, itr, true, false); } } - mutex_unlock(&binder_procs_lock); return 0; } @@ -6454,8 +6984,10 @@ const struct file_operations binder_fops = { }; DEFINE_SHOW_ATTRIBUTE(state); +DEFINE_SHOW_ATTRIBUTE(state_hashed); DEFINE_SHOW_ATTRIBUTE(stats); DEFINE_SHOW_ATTRIBUTE(transactions); +DEFINE_SHOW_ATTRIBUTE(transactions_hashed); DEFINE_SHOW_ATTRIBUTE(transaction_log); const struct binder_debugfs_entry binder_debugfs_entries[] = { @@ -6466,6 +6998,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = { .data = NULL, }, { + .name = "state_hashed", + .mode = 0444, + .fops = &state_hashed_fops, + .data = NULL, + }, + { .name = "stats", .mode = 0444, .fops = &stats_fops, @@ -6478,6 +7016,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = { .data = NULL, }, { + .name = "transactions_hashed", + .mode = 0444, + .fops = &transactions_hashed_fops, + .data = NULL, + }, + { .name = "transaction_log", .mode = 0444, .fops = &transaction_log_fops, @@ -6492,6 +7036,18 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = { {} /* terminator */ }; +void binder_add_device(struct binder_device *device) +{ + guard(spinlock)(&binder_devices_lock); + hlist_add_head(&device->hlist, &binder_devices); +} + +void binder_remove_device(struct binder_device *device) +{ + guard(spinlock)(&binder_devices_lock); + hlist_del_init(&device->hlist); +} + static int __init init_binder_device(const char *name) { int ret; @@ -6516,7 +7072,7 @@ static int __init init_binder_device(const char *name) return ret; } - hlist_add_head(&binder_device->hlist, &binder_devices); + binder_add_device(binder_device); return ret; } @@ -6528,6 +7084,7 @@ static int __init binder_init(void) struct binder_device *device; struct hlist_node *tmp; char *device_names = NULL; + const struct binder_debugfs_entry *db_entry; ret = binder_alloc_shrinker_init(); if (ret) @@ -6537,19 +7094,16 @@ static int __init binder_init(void) atomic_set(&binder_transaction_log_failed.cur, ~0U); binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); - if (binder_debugfs_dir_entry_root) { - const struct binder_debugfs_entry *db_entry; - binder_for_each_debugfs_entry(db_entry) - debugfs_create_file(db_entry->name, - db_entry->mode, - binder_debugfs_dir_entry_root, - db_entry->data, - db_entry->fops); + binder_for_each_debugfs_entry(db_entry) + debugfs_create_file(db_entry->name, + db_entry->mode, + binder_debugfs_dir_entry_root, + db_entry->data, + db_entry->fops); - binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", - binder_debugfs_dir_entry_root); - } + binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", + binder_debugfs_dir_entry_root); if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && strcmp(binder_devices_param, "") != 0) { @@ -6571,16 +7125,23 @@ static int __init binder_init(void) } } - ret = init_binderfs(); + ret = genl_register_family(&binder_nl_family); if (ret) goto err_init_binder_device_failed; + ret = init_binderfs(); + if (ret) + goto err_init_binderfs_failed; + return ret; +err_init_binderfs_failed: + genl_unregister_family(&binder_nl_family); + err_init_binder_device_failed: hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { misc_deregister(&device->miscdev); - hlist_del(&device->hlist); + binder_remove_device(device); kfree(device); } @@ -6588,6 +7149,7 @@ err_init_binder_device_failed: err_alloc_device_names_failed: debugfs_remove_recursive(binder_debugfs_dir_entry_root); + binder_alloc_shrinker_exit(); return ret; } @@ -6596,5 +7158,3 @@ device_initcall(binder_init); #define CREATE_TRACE_POINTS #include "binder_trace.h" - -MODULE_LICENSE("GPL v2"); |
