summaryrefslogtreecommitdiff
path: root/drivers/android/binder.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r--drivers/android/binder.c985
1 files changed, 753 insertions, 232 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b21a7b246a0d..535fc881c8da 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -68,10 +68,13 @@
#include <linux/sizes.h>
#include <linux/ktime.h>
+#include <kunit/visibility.h>
+
#include <uapi/linux/android/binder.h>
#include <linux/cacheflush.h>
+#include "binder_netlink.h"
#include "binder_internal.h"
#include "binder_trace.h"
@@ -79,6 +82,8 @@ static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
static HLIST_HEAD(binder_devices);
+static DEFINE_SPINLOCK(binder_devices_lock);
+
static HLIST_HEAD(binder_procs);
static DEFINE_MUTEX(binder_procs_lock);
@@ -277,7 +282,7 @@ _binder_proc_lock(struct binder_proc *proc, int line)
}
/**
- * binder_proc_unlock() - Release spinlock for given binder_proc
+ * binder_proc_unlock() - Release outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_proc_lock()
@@ -570,9 +575,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
return !thread->transaction_stack &&
- binder_worklist_empty_ilocked(&thread->todo) &&
- (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
- BINDER_LOOPER_STATE_REGISTERED));
+ binder_worklist_empty_ilocked(&thread->todo);
}
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
@@ -848,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
- if (!node->has_weak_ref && list_empty(&node->work.entry)) {
- if (target_list == NULL) {
- pr_err("invalid inc weak node for %d\n",
- node->debug_id);
- return -EINVAL;
- }
- /*
- * See comment above
- */
+ if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
- }
}
return 0;
}
@@ -1045,6 +1039,63 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
return NULL;
}
+/* Find the smallest unused descriptor the "slow way" */
+static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
+{
+ struct binder_ref *ref;
+ struct rb_node *n;
+ u32 desc;
+
+ desc = offset;
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->data.desc > desc)
+ break;
+ desc = ref->data.desc + 1;
+ }
+
+ return desc;
+}
+
+/*
+ * Find an available reference descriptor ID. The proc->outer_lock might
+ * be released in the process, in which case -EAGAIN is returned and the
+ * @desc should be considered invalid.
+ */
+static int get_ref_desc_olocked(struct binder_proc *proc,
+ struct binder_node *node,
+ u32 *desc)
+{
+ struct dbitmap *dmap = &proc->dmap;
+ unsigned int nbits, offset;
+ unsigned long *new, bit;
+
+ /* 0 is reserved for the context manager */
+ offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
+
+ if (!dbitmap_enabled(dmap)) {
+ *desc = slow_desc_lookup_olocked(proc, offset);
+ return 0;
+ }
+
+ if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
+ *desc = bit;
+ return 0;
+ }
+
+ /*
+ * The dbitmap is full and needs to grow. The proc->outer_lock
+ * is briefly released to allocate the new bitmap safely.
+ */
+ nbits = dbitmap_grow_nbits(dmap);
+ binder_proc_unlock(proc);
+ new = bitmap_zalloc(nbits, GFP_KERNEL);
+ binder_proc_lock(proc);
+ dbitmap_grow(dmap, new, nbits);
+
+ return -EAGAIN;
+}
+
/**
* binder_get_ref_for_node_olocked() - get the ref associated with given node
* @proc: binder_proc that owns the ref
@@ -1068,12 +1119,14 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
struct binder_node *node,
struct binder_ref *new_ref)
{
- struct binder_context *context = proc->context;
- struct rb_node **p = &proc->refs_by_node.rb_node;
- struct rb_node *parent = NULL;
struct binder_ref *ref;
- struct rb_node *n;
+ struct rb_node *parent;
+ struct rb_node **p;
+ u32 desc;
+retry:
+ p = &proc->refs_by_node.rb_node;
+ parent = NULL;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
@@ -1088,6 +1141,10 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
if (!new_ref)
return NULL;
+ /* might release the proc->outer_lock */
+ if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
+ goto retry;
+
binder_stats_created(BINDER_STAT_REF);
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
@@ -1095,14 +1152,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
- ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->data.desc > new_ref->data.desc)
- break;
- new_ref->data.desc = ref->data.desc + 1;
- }
-
+ new_ref->data.desc = desc;
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
@@ -1131,6 +1181,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ struct dbitmap *dmap = &ref->proc->dmap;
bool delete_node = false;
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
@@ -1138,6 +1189,8 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
+ if (dbitmap_enabled(dmap))
+ dbitmap_clear_bit(dmap, ref->data.desc);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
@@ -1168,6 +1221,12 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
+
+ if (ref->freeze) {
+ binder_dequeue_work(ref->proc, &ref->freeze->work);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ }
+
binder_stats_deleted(BINDER_STAT_REF);
}
@@ -1295,9 +1354,29 @@ static void binder_free_ref(struct binder_ref *ref)
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
+ kfree(ref->freeze);
kfree(ref);
}
+/* shrink descriptor bitmap if needed */
+static void try_shrink_dmap(struct binder_proc *proc)
+{
+ unsigned long *new;
+ int nbits;
+
+ binder_proc_lock(proc);
+ nbits = dbitmap_shrink_nbits(&proc->dmap);
+ binder_proc_unlock(proc);
+
+ if (!nbits)
+ return;
+
+ new = bitmap_zalloc(nbits, GFP_KERNEL);
+ binder_proc_lock(proc);
+ dbitmap_shrink(&proc->dmap, new, nbits);
+ binder_proc_unlock(proc);
+}
+
/**
* binder_update_ref_for_handle() - inc/dec the ref for given handle
* @proc: proc containing the ref
@@ -1334,8 +1413,10 @@ static int binder_update_ref_for_handle(struct binder_proc *proc,
*rdata = ref->data;
binder_proc_unlock(proc);
- if (delete_ref)
+ if (delete_ref) {
binder_free_ref(ref);
+ try_shrink_dmap(proc);
+ }
return ret;
err_no_ref:
@@ -1468,7 +1549,7 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
* by threads that are being released. When done with the binder_proc,
* this function is called to decrement the counter and free the
* proc if appropriate (proc has been released, all threads have
- * been released and not currenly in-use to process a transaction).
+ * been released and not currently in-use to process a transaction).
*/
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
@@ -1498,11 +1579,10 @@ static struct binder_thread *binder_get_txn_from(
{
struct binder_thread *from;
- spin_lock(&t->lock);
+ guard(spinlock)(&t->lock);
from = t->from;
if (from)
atomic_inc(&from->tmp_ref);
- spin_unlock(&t->lock);
return from;
}
@@ -1886,7 +1966,7 @@ static bool binder_validate_fixup(struct binder_proc *proc,
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
- * @fd: fd to close
+ * @file: file to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
@@ -2329,10 +2409,10 @@ err_fd_not_accepted:
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
- * @offset offset in target buffer to fixup
- * @skip_size bytes to skip in copy (fixup will be written later)
- * @fixup_data data to write at fixup offset
- * @node list node
+ * @offset: offset in target buffer to fixup
+ * @skip_size: bytes to skip in copy (fixup will be written later)
+ * @fixup_data: data to write at fixup offset
+ * @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2349,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
- * @offset offset in target buffer
- * @sender_uaddr user address in source buffer
- * @length bytes to copy
- * @node list node
+ * @offset: offset in target buffer
+ * @sender_uaddr: user address in source buffer
+ * @length: bytes to copy
+ * @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2905,6 +2985,69 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id,
binder_thread_dec_tmpref(from);
}
+/**
+ * binder_netlink_report() - report a transaction failure via netlink
+ * @proc: the binder proc sending the transaction
+ * @t: the binder transaction that failed
+ * @data_size: the user provided data size for the transaction
+ * @error: enum binder_driver_return_protocol returned to sender
+ */
+static void binder_netlink_report(struct binder_proc *proc,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error)
+{
+ const char *context = proc->context->name;
+ struct sk_buff *skb;
+ void *hdr;
+
+ if (!genl_has_listeners(&binder_nl_family, &init_net,
+ BINDER_NLGRP_REPORT))
+ return;
+
+ trace_binder_netlink_report(context, t, data_size, error);
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
+ if (!hdr)
+ goto free_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
+ nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
+ goto cancel_skb;
+
+ if (t->to_proc &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
+ goto cancel_skb;
+
+ if (t->to_thread &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
+ goto cancel_skb;
+
+ if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
+ goto cancel_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
+ nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
+ nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
+ goto cancel_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
+ GFP_KERNEL);
+ return;
+
+cancel_skb:
+ genlmsg_cancel(skb, hdr);
+free_skb:
+ nlmsg_free(skb);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -2932,8 +3075,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
ktime_t t_start_time = ktime_get();
- char *secctx = NULL;
- u32 secctx_sz = 0;
+ struct lsm_context lsmctx = { };
struct list_head sgc_head;
struct list_head pf_head;
const void __user *user_buffer = (const void __user *)
@@ -2955,6 +3097,32 @@ static void binder_transaction(struct binder_proc *proc,
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
binder_inner_proc_unlock(proc);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
+ goto err_alloc_t_failed;
+ }
+ INIT_LIST_HEAD(&t->fd_fixups);
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
+ t->debug_id = t_debug_id;
+ t->start_time = t_start_time;
+ t->from_pid = proc->pid;
+ t->from_tid = thread->pid;
+ t->sender_euid = task_euid(proc->tsk);
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->work.type = BINDER_WORK_TRANSACTION;
+ t->is_async = !reply && (tr->flags & TF_ONE_WAY);
+ t->is_reply = reply;
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -3058,10 +3226,8 @@ static void binder_transaction(struct binder_proc *proc,
}
if (!target_node) {
binder_txn_error("%d:%d cannot find target node\n",
- thread->pid, proc->pid);
- /*
- * return_error is set above
- */
+ proc->pid, thread->pid);
+ /* return_error is set above */
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
@@ -3143,24 +3309,13 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_inner_proc_unlock(proc);
}
+
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
- /* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL) {
- binder_txn_error("%d:%d cannot allocate transaction\n",
- thread->pid, proc->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -ENOMEM;
- return_error_line = __LINE__;
- goto err_alloc_t_failed;
- }
- INIT_LIST_HEAD(&t->fd_fixups);
- binder_stats_created(BINDER_STAT_TRANSACTION);
- spin_lock_init(&t->lock);
-
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
binder_txn_error("%d:%d cannot allocate work for transaction\n",
@@ -3172,48 +3327,28 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = t_debug_id;
- t->start_time = t_start_time;
-
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
- if (!reply && !(tr->flags & TF_ONE_WAY))
- t->from = thread;
- else
- t->from = NULL;
- t->from_pid = proc->pid;
- t->from_tid = thread->pid;
- t->sender_euid = task_euid(proc->tsk);
- t->to_proc = target_proc;
- t->to_thread = target_thread;
- t->code = tr->code;
- t->flags = tr->flags;
- t->priority = task_nice(current);
-
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
security_cred_getsecid(proc->cred, &secid);
- ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
- if (ret) {
+ ret = security_secid_to_secctx(secid, &lsmctx);
+ if (ret < 0) {
binder_txn_error("%d:%d failed to get security context\n",
thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
@@ -3221,7 +3356,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- added_size = ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(lsmctx.len, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
@@ -3255,23 +3390,23 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- if (secctx) {
+ if (lsmctx.context) {
int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
t->security_ctx = t->buffer->user_data + buf_offset;
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
- secctx, secctx_sz);
+ lsmctx.context, lsmctx.len);
if (err) {
t->security_ctx = 0;
WARN_ON(1);
}
- security_release_secctx(secctx, secctx_sz);
- secctx = NULL;
+ security_release_secctx(&lsmctx);
+ lsmctx.context = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
@@ -3315,7 +3450,7 @@ static void binder_transaction(struct binder_proc *proc,
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -3344,6 +3479,7 @@ static void binder_transaction(struct binder_proc *proc,
*/
copy_size = object_offset - user_offset;
if (copy_size && (user_offset > object_offset ||
+ object_offset > tr->data_size ||
binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer, user_offset,
@@ -3598,11 +3734,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (t->buffer->oneway_spam_suspect)
+ if (t->buffer->oneway_spam_suspect) {
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
- else
+ binder_netlink_report(proc, t, tr->data_size,
+ BR_ONEWAY_SPAM_SUSPECT);
+ } else {
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- t->work.type = BINDER_WORK_TRANSACTION;
+ }
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
@@ -3630,7 +3768,6 @@ static void binder_transaction(struct binder_proc *proc,
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
- t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
@@ -3651,8 +3788,11 @@ static void binder_transaction(struct binder_proc *proc,
* process and is put in a pending queue, waiting for the target
* process to be unfrozen.
*/
- if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_netlink_report(proc, t, tr->data_size,
+ return_error);
+ }
binder_enqueue_thread_work(thread, tcomplete);
if (return_error &&
return_error != BR_TRANSACTION_PENDING_FROZEN)
@@ -3693,17 +3833,14 @@ err_copy_data_failed:
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
- if (secctx)
- security_release_secctx(secctx, secctx_sz);
+ if (lsmctx.context)
+ security_release_secctx(&lsmctx);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
if (trace_binder_txn_latency_free_enabled())
binder_txn_latency_free(t);
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
@@ -3714,14 +3851,19 @@ err_invalid_target_handle:
binder_dec_node_tmpref(target_node);
}
+ binder_netlink_report(proc, t, tr->data_size, return_error);
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
(tr->flags & TF_ONE_WAY ? "async" : "call"),
target_proc ? target_proc->pid : 0,
target_thread ? target_thread->pid : 0,
t_debug_id, return_error, return_error_param,
- (u64)tr->data_size, (u64)tr->offsets_size,
+ tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
if (target_thread)
@@ -3763,16 +3905,164 @@ err_invalid_target_handle:
}
}
+static int
+binder_request_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+
+ freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
+ if (!freeze)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+ if (ref->freeze) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
+ proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+
+ binder_stats_created(BINDER_STAT_FREEZE);
+ INIT_LIST_HEAD(&freeze->work.entry);
+ freeze->cookie = handle_cookie->cookie;
+ freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ ref->freeze = freeze;
+
+ if (ref->node->proc) {
+ binder_inner_proc_lock(ref->node->proc);
+ freeze->is_frozen = ref->node->proc->is_frozen;
+ binder_inner_proc_unlock(ref->node->proc);
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
+ }
+
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_clear_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+
+ if (!ref->freeze) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
+ proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ freeze = ref->freeze;
+ binder_inner_proc_lock(proc);
+ if (freeze->cookie != handle_cookie->cookie) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
+ proc->pid, thread->pid, (u64)freeze->cookie,
+ (u64)handle_cookie->cookie);
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ ref->freeze = NULL;
+ /*
+ * Take the existing freeze object and overwrite its work type. There are three cases here:
+ * 1. No pending notification. In this case just add the work to the queue.
+ * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
+ * should resend with the new work type.
+ * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
+ * needs to be done here.
+ */
+ freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
+ if (list_empty(&freeze->work.entry)) {
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ } else if (freeze->sent) {
+ freeze->resend = true;
+ }
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_freeze_notification_done(struct binder_proc *proc,
+ struct binder_thread *thread,
+ binder_uintptr_t cookie)
+{
+ struct binder_ref_freeze *freeze = NULL;
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ struct binder_ref_freeze *tmp_freeze =
+ container_of(w, struct binder_ref_freeze, work);
+
+ if (tmp_freeze->cookie == cookie) {
+ freeze = tmp_freeze;
+ break;
+ }
+ }
+ if (!freeze) {
+ binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
+ proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
+ return -EINVAL;
+ }
+ binder_dequeue_work_ilocked(&freeze->work);
+ freeze->sent = false;
+ if (freeze->resend) {
+ freeze->resend = false;
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ return 0;
+}
+
/**
* binder_free_buf() - free the specified buffer
- * @proc: binder proc that owns buffer
- * @buffer: buffer to be freed
- * @is_failure: failed to send transaction
+ * @proc: binder proc that owns buffer
+ * @thread: binder thread performing the buffer release
+ * @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
*
- * If buffer for an async transaction, enqueue the next async
+ * If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
- * Cleanup buffer and free it.
+ * Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,
@@ -3991,20 +4281,21 @@ static int binder_thread_write(struct binder_proc *proc,
if (IS_ERR_OR_NULL(buffer)) {
if (PTR_ERR(buffer) == -EPERM) {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
} else {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
}
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
- proc->pid, thread->pid, (u64)data_ptr,
+ "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
+ proc->pid, thread->pid,
+ (unsigned long)data_ptr - proc->alloc.vm_start,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
binder_free_buf(proc, thread, buffer, false);
@@ -4246,6 +4537,44 @@ static int binder_thread_write(struct binder_proc *proc,
binder_inner_proc_unlock(proc);
} break;
+ case BC_REQUEST_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_request_freeze_notification(proc, thread,
+ &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_FREEZE_NOTIFICATION_DONE: {
+ binder_uintptr_t cookie;
+ int error;
+
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(cookie);
+ error = binder_freeze_notification_done(proc, thread, cookie);
+ if (error)
+ return error;
+ } break;
+
default:
pr_err("%d:%d unknown command %u\n",
proc->pid, thread->pid, cmd);
@@ -4340,6 +4669,8 @@ static int binder_wait_for_work(struct binder_thread *thread,
*
* If we fail to allocate an fd, skip the install and release
* any fds that have already been allocated.
+ *
+ * Return: 0 on success, a negative errno code on failure.
*/
static int binder_apply_fd_fixups(struct binder_proc *proc,
struct binder_transaction *t)
@@ -4635,6 +4966,46 @@ retry:
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
+
+ case BINDER_WORK_FROZEN_BINDER: {
+ struct binder_ref_freeze *freeze;
+ struct binder_frozen_state_info info;
+
+ memset(&info, 0, sizeof(info));
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ info.is_frozen = freeze->is_frozen;
+ info.cookie = freeze->cookie;
+ freeze->sent = true;
+ binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
+ binder_inner_proc_unlock(proc);
+
+ if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &info, sizeof(info)))
+ return -EFAULT;
+ ptr += sizeof(info);
+ binder_stat_br(proc, thread, BR_FROZEN_BINDER);
+ goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
+ } break;
+
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze =
+ container_of(w, struct binder_ref_freeze, work);
+ binder_uintptr_t cookie = freeze->cookie;
+
+ binder_inner_proc_unlock(proc);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
+ } break;
+
default:
binder_inner_proc_unlock(proc);
pr_err("%d:%d: bad work type %d\n",
@@ -4743,16 +5114,14 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
+ "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
(cmd == BR_TRANSACTION_SEC_CTX) ?
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
- t->buffer->data_size, t->buffer->offsets_size,
- (u64)trd->data.ptr.buffer,
- (u64)trd->data.ptr.offsets);
+ t->buffer->data_size, t->buffer->offsets_size);
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4844,6 +5213,16 @@ static void binder_release_work(struct binder_proc *proc,
} break;
case BINDER_WORK_NODE:
break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze;
+
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered freeze notification, %016llx\n",
+ (u64)freeze->cookie);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ } break;
default:
pr_err("unexpected work type, %d, not freed\n",
wtype);
@@ -4924,6 +5303,7 @@ static void binder_free_proc(struct binder_proc *proc)
__func__, proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
+ binder_remove_device(device);
kfree(proc->context->name);
kfree(device);
}
@@ -4931,6 +5311,7 @@ static void binder_free_proc(struct binder_proc *proc)
put_task_struct(proc->tsk);
put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
+ dbitmap_free(&proc->dmap);
kfree(proc);
}
@@ -5065,10 +5446,9 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
- if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
+ return -EFAULT;
+
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
@@ -5083,8 +5463,6 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
goto out;
}
}
@@ -5098,22 +5476,17 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
- if (ret < 0) {
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
+ if (ret < 0)
goto out;
- }
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
out:
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
return ret;
}
@@ -5126,32 +5499,28 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct binder_node *new_node;
kuid_t curr_euid = current_euid();
- mutex_lock(&context->context_mgr_node_lock);
+ guard(mutex)(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = security_binder_set_context_mgr(proc->cred);
if (ret < 0)
- goto out;
+ return ret;
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
- ret = -EPERM;
- goto out;
+ return -EPERM;
}
} else {
context->binder_context_mgr_uid = curr_euid;
}
new_node = binder_new_node(proc, fbo);
- if (!new_node) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!new_node)
+ return -ENOMEM;
binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
@@ -5160,8 +5529,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
binder_put_node(new_node);
-out:
- mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
@@ -5242,6 +5609,57 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
return false;
}
+static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
+{
+ struct binder_node *prev = NULL;
+ struct rb_node *n;
+ struct binder_ref *ref;
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
+ struct binder_node *node;
+
+ node = rb_entry(n, struct binder_node, rb_node);
+ binder_inc_node_tmpref_ilocked(node);
+ binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
+ binder_node_lock(node);
+ hlist_for_each_entry(ref, &node->refs, node_entry) {
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * freeze notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->freeze) {
+ binder_inner_proc_unlock(ref->proc);
+ continue;
+ }
+ ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ if (list_empty(&ref->freeze->work.entry)) {
+ ref->freeze->is_frozen = is_frozen;
+ binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ } else {
+ if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
+ ref->freeze->resend = true;
+ ref->freeze->is_frozen = is_frozen;
+ }
+ binder_inner_proc_unlock(ref->proc);
+ }
+ prev = node;
+ binder_node_unlock(node);
+ binder_inner_proc_lock(proc);
+ if (proc->is_dead)
+ break;
+ }
+ binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
+}
+
static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc)
{
@@ -5253,6 +5671,7 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
target_proc->async_recv = false;
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ binder_add_freeze_work(target_proc, false);
return 0;
}
@@ -5285,6 +5704,8 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
binder_inner_proc_lock(target_proc);
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ } else {
+ binder_add_freeze_work(target_proc, true);
}
return ret;
@@ -5343,11 +5764,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct binder_thread *thread;
void __user *ubuf = (void __user *)arg;
- /*pr_info("binder_ioctl: %d:%d %x %lx\n",
- proc->pid, current->pid, cmd, arg);*/
-
- binder_selftest_alloc(&proc->alloc);
-
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5583,10 +5999,11 @@ static void binder_vma_close(struct vm_area_struct *vma)
binder_alloc_vma_close(&proc->alloc);
}
-static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
+VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
@@ -5634,6 +6051,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+
+ dbitmap_init(&proc->dmap);
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
@@ -5658,6 +6077,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->delivered_freeze);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
@@ -5752,7 +6172,7 @@ static int binder_release(struct inode *nodp, struct file *filp)
debugfs_remove(proc->debugfs_entry);
if (proc->binderfs_entry) {
- binderfs_remove_file(proc->binderfs_entry);
+ simple_recursive_removal(proc->binderfs_entry, NULL);
proc->binderfs_entry = NULL;
}
@@ -5904,6 +6324,7 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(proc, &proc->todo);
binder_release_work(proc, &proc->delivered_death);
+ binder_release_work(proc, &proc->delivered_freeze);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
@@ -5945,14 +6366,13 @@ static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
- mutex_lock(&binder_deferred_lock);
+ guard(mutex)(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
schedule_work(&binder_deferred_work);
}
- mutex_unlock(&binder_deferred_lock);
}
static void print_binder_transaction_ilocked(struct seq_file *m,
@@ -5967,13 +6387,13 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply,
+ t->code, t->flags, t->priority, t->is_async, t->is_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
@@ -5994,14 +6414,14 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.buffer - buffer->user_data);
+ buffer->user_data - proc->alloc.vm_start);
}
static void print_binder_work_ilocked(struct seq_file *m,
- struct binder_proc *proc,
- const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w, bool hash_ptrs)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -6024,9 +6444,15 @@ static void print_binder_work_ilocked(struct seq_file *m,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
- prefix, node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
+ if (hash_ptrs)
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id,
+ (void *)(long)node->ptr,
+ (void *)(long)node->cookie);
+ else
+ seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+ prefix, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
@@ -6037,6 +6463,12 @@ static void print_binder_work_ilocked(struct seq_file *m,
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
seq_printf(m, "%shas cleared death notification\n", prefix);
break;
+ case BINDER_WORK_FROZEN_BINDER:
+ seq_printf(m, "%shas frozen binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
+ seq_printf(m, "%shas cleared freeze notification\n", prefix);
+ break;
default:
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
break;
@@ -6045,7 +6477,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
static void print_binder_thread_ilocked(struct seq_file *m,
struct binder_thread *thread,
- int print_always)
+ bool print_always, bool hash_ptrs)
{
struct binder_transaction *t;
struct binder_work *w;
@@ -6075,14 +6507,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
}
list_for_each_entry(w, &thread->todo, entry) {
print_binder_work_ilocked(m, thread->proc, " ",
- " pending transaction", w);
+ " pending transaction",
+ w, hash_ptrs);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
static void print_binder_node_nilocked(struct seq_file *m,
- struct binder_node *node)
+ struct binder_node *node,
+ bool hash_ptrs)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -6090,8 +6524,13 @@ static void print_binder_node_nilocked(struct seq_file *m,
count = hlist_count_nodes(&node->refs);
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
- node->debug_id, (u64)node->ptr, (u64)node->cookie,
+ if (hash_ptrs)
+ seq_printf(m, " node %d: u%p c%p", node->debug_id,
+ (void *)(long)node->ptr, (void *)(long)node->cookie);
+ else
+ seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
+ seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count, node->tmp_refs);
@@ -6104,7 +6543,8 @@ static void print_binder_node_nilocked(struct seq_file *m,
if (node->proc) {
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work_ilocked(m, node->proc, " ",
- " pending async transaction", w);
+ " pending async transaction",
+ w, hash_ptrs);
}
}
@@ -6120,8 +6560,54 @@ static void print_binder_ref_olocked(struct seq_file *m,
binder_node_unlock(ref->node);
}
-static void print_binder_proc(struct seq_file *m,
- struct binder_proc *proc, int print_all)
+/**
+ * print_next_binder_node_ilocked() - Print binder_node from a locked list
+ * @m: struct seq_file for output via seq_printf()
+ * @proc: struct binder_proc we hold the inner_proc_lock to (if any)
+ * @node: struct binder_node to print fields of
+ * @prev_node: struct binder_node we hold a temporary reference to (if any)
+ * @hash_ptrs: whether to hash @node's binder_uintptr_t fields
+ *
+ * Helper function to handle synchronization around printing a struct
+ * binder_node while iterating through @proc->nodes or the dead nodes list.
+ * Caller must hold either @proc->inner_lock (for live nodes) or
+ * binder_dead_nodes_lock. This lock will be released during the body of this
+ * function, but it will be reacquired before returning to the caller.
+ *
+ * Return: pointer to the struct binder_node we hold a tmpref on
+ */
+static struct binder_node *
+print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_node *prev_node, bool hash_ptrs)
+{
+ /*
+ * Take a temporary reference on the node so that isn't freed while
+ * we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /*
+ * Live nodes need to drop the inner proc lock and dead nodes need to
+ * drop the binder_dead_nodes_lock before trying to take the node lock.
+ */
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ if (prev_node)
+ binder_put_node(prev_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node, hash_ptrs);
+ binder_node_inner_unlock(node);
+ if (proc)
+ binder_inner_proc_lock(proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ return node;
+}
+
+static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
+ bool print_all, bool hash_ptrs)
{
struct binder_work *w;
struct rb_node *n;
@@ -6134,31 +6620,19 @@ static void print_binder_proc(struct seq_file *m,
header_pos = m->count;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
- rb_node), print_all);
+ rb_node), print_all, hash_ptrs);
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
if (!print_all && !node->has_async_transaction)
continue;
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the tree
- * while we print it.
- */
- binder_inc_node_tmpref_ilocked(node);
- /* Need to drop inner lock to take node lock */
- binder_inner_proc_unlock(proc);
- if (last_node)
- binder_put_node(last_node);
- binder_node_inner_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_inner_unlock(node);
- last_node = node;
- binder_inner_proc_lock(proc);
+ last_node = print_next_binder_node_ilocked(m, proc, node,
+ last_node,
+ hash_ptrs);
}
binder_inner_proc_unlock(proc);
if (last_node)
@@ -6166,23 +6640,26 @@ static void print_binder_proc(struct seq_file *m,
if (print_all) {
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc);
- n != NULL;
- n = rb_next(n))
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
print_binder_ref_olocked(m, rb_entry(n,
- struct binder_ref,
- rb_node_desc));
+ struct binder_ref,
+ rb_node_desc));
binder_proc_unlock(proc);
}
binder_alloc_print_allocated(m, &proc->alloc);
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
print_binder_work_ilocked(m, proc, " ",
- " pending transaction", w);
+ " pending transaction", w,
+ hash_ptrs);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ seq_puts(m, " has delivered freeze binder\n");
+ break;
+ }
binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
@@ -6209,7 +6686,9 @@ static const char * const binder_return_strings[] = {
"BR_FAILED_REPLY",
"BR_FROZEN_REPLY",
"BR_ONEWAY_SPAM_SUSPECT",
- "BR_TRANSACTION_PENDING_FROZEN"
+ "BR_TRANSACTION_PENDING_FROZEN",
+ "BR_FROZEN_BINDER",
+ "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_command_strings[] = {
@@ -6232,6 +6711,9 @@ static const char * const binder_command_strings[] = {
"BC_DEAD_BINDER_DONE",
"BC_TRANSACTION_SG",
"BC_REPLY_SG",
+ "BC_REQUEST_FREEZE_NOTIFICATION",
+ "BC_CLEAR_FREEZE_NOTIFICATION",
+ "BC_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_objstat_strings[] = {
@@ -6241,7 +6723,8 @@ static const char * const binder_objstat_strings[] = {
"ref",
"death",
"transaction",
- "transaction_complete"
+ "transaction_complete",
+ "freeze",
};
static void print_binder_stats(struct seq_file *m, const char *prefix,
@@ -6301,7 +6784,7 @@ static void print_binder_proc_stats(struct seq_file *m,
count = 0;
ready_threads = 0;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
count++;
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
@@ -6315,7 +6798,7 @@ static void print_binder_proc_stats(struct seq_file *m,
ready_threads,
free_async_space);
count = 0;
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n))
count++;
binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
@@ -6323,7 +6806,7 @@ static void print_binder_proc_stats(struct seq_file *m,
strong = 0;
weak = 0;
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
@@ -6350,7 +6833,7 @@ static void print_binder_proc_stats(struct seq_file *m,
print_binder_stats(m, " ", &proc->stats);
}
-static int state_show(struct seq_file *m, void *unused)
+static void print_binder_state(struct seq_file *m, bool hash_ptrs)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -6361,31 +6844,40 @@ static int state_show(struct seq_file *m, void *unused)
spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the list
- * while we print it.
- */
- node->tmp_refs++;
- spin_unlock(&binder_dead_nodes_lock);
- if (last_node)
- binder_put_node(last_node);
- binder_node_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_unlock(node);
- last_node = node;
- spin_lock(&binder_dead_nodes_lock);
- }
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
+ last_node = print_next_binder_node_ilocked(m, NULL, node,
+ last_node,
+ hash_ptrs);
spin_unlock(&binder_dead_nodes_lock);
if (last_node)
binder_put_node(last_node);
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 1);
+ print_binder_proc(m, proc, true, hash_ptrs);
mutex_unlock(&binder_procs_lock);
+}
+
+static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
+{
+ struct binder_proc *proc;
+
+ seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
+ print_binder_proc(m, proc, false, hash_ptrs);
+ mutex_unlock(&binder_procs_lock);
+}
+static int state_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, false);
+ return 0;
+}
+
+static int state_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, true);
return 0;
}
@@ -6407,14 +6899,13 @@ static int stats_show(struct seq_file *m, void *unused)
static int transactions_show(struct seq_file *m, void *unused)
{
- struct binder_proc *proc;
-
- seq_puts(m, "binder transactions:\n");
- mutex_lock(&binder_procs_lock);
- hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 0);
- mutex_unlock(&binder_procs_lock);
+ print_binder_transactions(m, false);
+ return 0;
+}
+static int transactions_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_transactions(m, true);
return 0;
}
@@ -6423,14 +6914,13 @@ static int proc_show(struct seq_file *m, void *unused)
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- mutex_lock(&binder_procs_lock);
+ guard(mutex)(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, itr, 1);
+ print_binder_proc(m, itr, true, false);
}
}
- mutex_unlock(&binder_procs_lock);
return 0;
}
@@ -6494,8 +6984,10 @@ const struct file_operations binder_fops = {
};
DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(state_hashed);
DEFINE_SHOW_ATTRIBUTE(stats);
DEFINE_SHOW_ATTRIBUTE(transactions);
+DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
DEFINE_SHOW_ATTRIBUTE(transaction_log);
const struct binder_debugfs_entry binder_debugfs_entries[] = {
@@ -6506,6 +6998,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "state_hashed",
+ .mode = 0444,
+ .fops = &state_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "stats",
.mode = 0444,
.fops = &stats_fops,
@@ -6518,6 +7016,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "transactions_hashed",
+ .mode = 0444,
+ .fops = &transactions_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "transaction_log",
.mode = 0444,
.fops = &transaction_log_fops,
@@ -6532,6 +7036,18 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
{} /* terminator */
};
+void binder_add_device(struct binder_device *device)
+{
+ guard(spinlock)(&binder_devices_lock);
+ hlist_add_head(&device->hlist, &binder_devices);
+}
+
+void binder_remove_device(struct binder_device *device)
+{
+ guard(spinlock)(&binder_devices_lock);
+ hlist_del_init(&device->hlist);
+}
+
static int __init init_binder_device(const char *name)
{
int ret;
@@ -6556,7 +7072,7 @@ static int __init init_binder_device(const char *name)
return ret;
}
- hlist_add_head(&binder_device->hlist, &binder_devices);
+ binder_add_device(binder_device);
return ret;
}
@@ -6609,16 +7125,23 @@ static int __init binder_init(void)
}
}
- ret = init_binderfs();
+ ret = genl_register_family(&binder_nl_family);
if (ret)
goto err_init_binder_device_failed;
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binderfs_failed;
+
return ret;
+err_init_binderfs_failed:
+ genl_unregister_family(&binder_nl_family);
+
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
- hlist_del(&device->hlist);
+ binder_remove_device(device);
kfree(device);
}
@@ -6635,5 +7158,3 @@ device_initcall(binder_init);
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");