summaryrefslogtreecommitdiff
path: root/drivers/android
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android')
-rw-r--r--drivers/android/binder.c476
-rw-r--r--drivers/android/binder_alloc.c384
-rw-r--r--drivers/android/binder_alloc.h47
-rw-r--r--drivers/android/binder_alloc_selftest.c18
-rw-r--r--drivers/android/binder_internal.h40
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/android/binderfs.c15
-rw-r--r--drivers/android/dbitmap.h168
8 files changed, 929 insertions, 221 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index dd6923d37931..5fc2c8ee61b1 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -277,7 +277,7 @@ _binder_proc_lock(struct binder_proc *proc, int line)
}
/**
- * binder_proc_unlock() - Release spinlock for given binder_proc
+ * binder_proc_unlock() - Release outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_proc_lock()
@@ -570,9 +570,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
return !thread->transaction_stack &&
- binder_worklist_empty_ilocked(&thread->todo) &&
- (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
- BINDER_LOOPER_STATE_REGISTERED));
+ binder_worklist_empty_ilocked(&thread->todo);
}
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
@@ -1045,6 +1043,63 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
return NULL;
}
+/* Find the smallest unused descriptor the "slow way" */
+static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
+{
+ struct binder_ref *ref;
+ struct rb_node *n;
+ u32 desc;
+
+ desc = offset;
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->data.desc > desc)
+ break;
+ desc = ref->data.desc + 1;
+ }
+
+ return desc;
+}
+
+/*
+ * Find an available reference descriptor ID. The proc->outer_lock might
+ * be released in the process, in which case -EAGAIN is returned and the
+ * @desc should be considered invalid.
+ */
+static int get_ref_desc_olocked(struct binder_proc *proc,
+ struct binder_node *node,
+ u32 *desc)
+{
+ struct dbitmap *dmap = &proc->dmap;
+ unsigned int nbits, offset;
+ unsigned long *new, bit;
+
+ /* 0 is reserved for the context manager */
+ offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
+
+ if (!dbitmap_enabled(dmap)) {
+ *desc = slow_desc_lookup_olocked(proc, offset);
+ return 0;
+ }
+
+ if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
+ *desc = bit;
+ return 0;
+ }
+
+ /*
+ * The dbitmap is full and needs to grow. The proc->outer_lock
+ * is briefly released to allocate the new bitmap safely.
+ */
+ nbits = dbitmap_grow_nbits(dmap);
+ binder_proc_unlock(proc);
+ new = bitmap_zalloc(nbits, GFP_KERNEL);
+ binder_proc_lock(proc);
+ dbitmap_grow(dmap, new, nbits);
+
+ return -EAGAIN;
+}
+
/**
* binder_get_ref_for_node_olocked() - get the ref associated with given node
* @proc: binder_proc that owns the ref
@@ -1068,12 +1123,14 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
struct binder_node *node,
struct binder_ref *new_ref)
{
- struct binder_context *context = proc->context;
- struct rb_node **p = &proc->refs_by_node.rb_node;
- struct rb_node *parent = NULL;
struct binder_ref *ref;
- struct rb_node *n;
+ struct rb_node *parent;
+ struct rb_node **p;
+ u32 desc;
+retry:
+ p = &proc->refs_by_node.rb_node;
+ parent = NULL;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
@@ -1088,6 +1145,10 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
if (!new_ref)
return NULL;
+ /* might release the proc->outer_lock */
+ if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
+ goto retry;
+
binder_stats_created(BINDER_STAT_REF);
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
@@ -1095,14 +1156,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
- ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->data.desc > new_ref->data.desc)
- break;
- new_ref->data.desc = ref->data.desc + 1;
- }
-
+ new_ref->data.desc = desc;
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
@@ -1131,6 +1185,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ struct dbitmap *dmap = &ref->proc->dmap;
bool delete_node = false;
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
@@ -1138,6 +1193,8 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
+ if (dbitmap_enabled(dmap))
+ dbitmap_clear_bit(dmap, ref->data.desc);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
@@ -1168,6 +1225,12 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
+
+ if (ref->freeze) {
+ binder_dequeue_work(ref->proc, &ref->freeze->work);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ }
+
binder_stats_deleted(BINDER_STAT_REF);
}
@@ -1295,9 +1358,29 @@ static void binder_free_ref(struct binder_ref *ref)
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
+ kfree(ref->freeze);
kfree(ref);
}
+/* shrink descriptor bitmap if needed */
+static void try_shrink_dmap(struct binder_proc *proc)
+{
+ unsigned long *new;
+ int nbits;
+
+ binder_proc_lock(proc);
+ nbits = dbitmap_shrink_nbits(&proc->dmap);
+ binder_proc_unlock(proc);
+
+ if (!nbits)
+ return;
+
+ new = bitmap_zalloc(nbits, GFP_KERNEL);
+ binder_proc_lock(proc);
+ dbitmap_shrink(&proc->dmap, new, nbits);
+ binder_proc_unlock(proc);
+}
+
/**
* binder_update_ref_for_handle() - inc/dec the ref for given handle
* @proc: proc containing the ref
@@ -1334,8 +1417,10 @@ static int binder_update_ref_for_handle(struct binder_proc *proc,
*rdata = ref->data;
binder_proc_unlock(proc);
- if (delete_ref)
+ if (delete_ref) {
binder_free_ref(ref);
+ try_shrink_dmap(proc);
+ }
return ret;
err_no_ref:
@@ -1468,7 +1553,7 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
* by threads that are being released. When done with the binder_proc,
* this function is called to decrement the counter and free the
* proc if appropriate (proc has been released, all threads have
- * been released and not currenly in-use to process a transaction).
+ * been released and not currently in-use to process a transaction).
*/
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
@@ -1886,7 +1971,7 @@ static bool binder_validate_fixup(struct binder_proc *proc,
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
- * @fd: fd to close
+ * @file: file to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
@@ -2932,8 +3017,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
ktime_t t_start_time = ktime_get();
- char *secctx = NULL;
- u32 secctx_sz = 0;
+ struct lsm_context lsmctx = { };
struct list_head sgc_head;
struct list_head pf_head;
const void __user *user_buffer = (const void __user *)
@@ -3212,8 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
size_t added_size;
security_cred_getsecid(proc->cred, &secid);
- ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
- if (ret) {
+ ret = security_secid_to_secctx(secid, &lsmctx);
+ if (ret < 0) {
binder_txn_error("%d:%d failed to get security context\n",
thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
@@ -3221,7 +3305,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- added_size = ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(lsmctx.len, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
@@ -3255,23 +3339,23 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- if (secctx) {
+ if (lsmctx.context) {
int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
t->security_ctx = t->buffer->user_data + buf_offset;
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
- secctx, secctx_sz);
+ lsmctx.context, lsmctx.len);
if (err) {
t->security_ctx = 0;
WARN_ON(1);
}
- security_release_secctx(secctx, secctx_sz);
- secctx = NULL;
+ security_release_secctx(&lsmctx);
+ lsmctx.context = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
@@ -3315,7 +3399,7 @@ static void binder_transaction(struct binder_proc *proc,
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -3344,6 +3428,7 @@ static void binder_transaction(struct binder_proc *proc,
*/
copy_size = object_offset - user_offset;
if (copy_size && (user_offset > object_offset ||
+ object_offset > tr->data_size ||
binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer, user_offset,
@@ -3693,8 +3778,8 @@ err_copy_data_failed:
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
- if (secctx)
- security_release_secctx(secctx, secctx_sz);
+ if (lsmctx.context)
+ security_release_secctx(&lsmctx);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -3715,13 +3800,13 @@ err_invalid_target_handle:
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
(tr->flags & TF_ONE_WAY ? "async" : "call"),
target_proc ? target_proc->pid : 0,
target_thread ? target_thread->pid : 0,
t_debug_id, return_error, return_error_param,
- (u64)tr->data_size, (u64)tr->offsets_size,
+ tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
if (target_thread)
@@ -3763,6 +3848,153 @@ err_invalid_target_handle:
}
}
+static int
+binder_request_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+
+ freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
+ if (!freeze)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+ if (ref->freeze) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
+ proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+
+ binder_stats_created(BINDER_STAT_FREEZE);
+ INIT_LIST_HEAD(&freeze->work.entry);
+ freeze->cookie = handle_cookie->cookie;
+ freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ ref->freeze = freeze;
+
+ if (ref->node->proc) {
+ binder_inner_proc_lock(ref->node->proc);
+ freeze->is_frozen = ref->node->proc->is_frozen;
+ binder_inner_proc_unlock(ref->node->proc);
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
+ }
+
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_clear_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+
+ if (!ref->freeze) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
+ proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ freeze = ref->freeze;
+ binder_inner_proc_lock(proc);
+ if (freeze->cookie != handle_cookie->cookie) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
+ proc->pid, thread->pid, (u64)freeze->cookie,
+ (u64)handle_cookie->cookie);
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ ref->freeze = NULL;
+ /*
+ * Take the existing freeze object and overwrite its work type. There are three cases here:
+ * 1. No pending notification. In this case just add the work to the queue.
+ * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
+ * should resend with the new work type.
+ * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
+ * needs to be done here.
+ */
+ freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
+ if (list_empty(&freeze->work.entry)) {
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ } else if (freeze->sent) {
+ freeze->resend = true;
+ }
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_freeze_notification_done(struct binder_proc *proc,
+ struct binder_thread *thread,
+ binder_uintptr_t cookie)
+{
+ struct binder_ref_freeze *freeze = NULL;
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ struct binder_ref_freeze *tmp_freeze =
+ container_of(w, struct binder_ref_freeze, work);
+
+ if (tmp_freeze->cookie == cookie) {
+ freeze = tmp_freeze;
+ break;
+ }
+ }
+ if (!freeze) {
+ binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
+ proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
+ return -EINVAL;
+ }
+ binder_dequeue_work_ilocked(&freeze->work);
+ freeze->sent = false;
+ if (freeze->resend) {
+ freeze->resend = false;
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ return 0;
+}
+
/**
* binder_free_buf() - free the specified buffer
* @proc: binder proc that owns buffer
@@ -4246,6 +4478,44 @@ static int binder_thread_write(struct binder_proc *proc,
binder_inner_proc_unlock(proc);
} break;
+ case BC_REQUEST_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_request_freeze_notification(proc, thread,
+ &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_FREEZE_NOTIFICATION_DONE: {
+ binder_uintptr_t cookie;
+ int error;
+
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(cookie);
+ error = binder_freeze_notification_done(proc, thread, cookie);
+ if (error)
+ return error;
+ } break;
+
default:
pr_err("%d:%d unknown command %u\n",
proc->pid, thread->pid, cmd);
@@ -4635,6 +4905,46 @@ retry:
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
+
+ case BINDER_WORK_FROZEN_BINDER: {
+ struct binder_ref_freeze *freeze;
+ struct binder_frozen_state_info info;
+
+ memset(&info, 0, sizeof(info));
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ info.is_frozen = freeze->is_frozen;
+ info.cookie = freeze->cookie;
+ freeze->sent = true;
+ binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
+ binder_inner_proc_unlock(proc);
+
+ if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &info, sizeof(info)))
+ return -EFAULT;
+ ptr += sizeof(info);
+ binder_stat_br(proc, thread, BR_FROZEN_BINDER);
+ goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
+ } break;
+
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze =
+ container_of(w, struct binder_ref_freeze, work);
+ binder_uintptr_t cookie = freeze->cookie;
+
+ binder_inner_proc_unlock(proc);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
+ } break;
+
default:
binder_inner_proc_unlock(proc);
pr_err("%d:%d: bad work type %d\n",
@@ -4844,6 +5154,16 @@ static void binder_release_work(struct binder_proc *proc,
} break;
case BINDER_WORK_NODE:
break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze;
+
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered freeze notification, %016llx\n",
+ (u64)freeze->cookie);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ } break;
default:
pr_err("unexpected work type, %d, not freed\n",
wtype);
@@ -4931,6 +5251,7 @@ static void binder_free_proc(struct binder_proc *proc)
put_task_struct(proc->tsk);
put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
+ dbitmap_free(&proc->dmap);
kfree(proc);
}
@@ -5242,6 +5563,57 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
return false;
}
+static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
+{
+ struct binder_node *prev = NULL;
+ struct rb_node *n;
+ struct binder_ref *ref;
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
+ struct binder_node *node;
+
+ node = rb_entry(n, struct binder_node, rb_node);
+ binder_inc_node_tmpref_ilocked(node);
+ binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
+ binder_node_lock(node);
+ hlist_for_each_entry(ref, &node->refs, node_entry) {
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * freeze notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->freeze) {
+ binder_inner_proc_unlock(ref->proc);
+ continue;
+ }
+ ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ if (list_empty(&ref->freeze->work.entry)) {
+ ref->freeze->is_frozen = is_frozen;
+ binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ } else {
+ if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
+ ref->freeze->resend = true;
+ ref->freeze->is_frozen = is_frozen;
+ }
+ binder_inner_proc_unlock(ref->proc);
+ }
+ prev = node;
+ binder_node_unlock(node);
+ binder_inner_proc_lock(proc);
+ if (proc->is_dead)
+ break;
+ }
+ binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
+}
+
static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc)
{
@@ -5253,6 +5625,7 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
target_proc->async_recv = false;
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ binder_add_freeze_work(target_proc, false);
return 0;
}
@@ -5285,6 +5658,8 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
binder_inner_proc_lock(target_proc);
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ } else {
+ binder_add_freeze_work(target_proc, true);
}
return ret;
@@ -5367,7 +5742,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
break;
case BINDER_SET_MAX_THREADS: {
- int max_threads;
+ u32 max_threads;
if (copy_from_user(&max_threads, ubuf,
sizeof(max_threads))) {
@@ -5634,6 +6009,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+
+ dbitmap_init(&proc->dmap);
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
@@ -5658,6 +6035,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->delivered_freeze);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
@@ -5904,6 +6282,7 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(proc, &proc->todo);
binder_release_work(proc, &proc->delivered_death);
+ binder_release_work(proc, &proc->delivered_freeze);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
@@ -5994,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.buffer - buffer->user_data);
+ buffer->user_data - proc->alloc.vm_start);
}
static void print_binder_work_ilocked(struct seq_file *m,
@@ -6037,6 +6416,12 @@ static void print_binder_work_ilocked(struct seq_file *m,
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
seq_printf(m, "%shas cleared death notification\n", prefix);
break;
+ case BINDER_WORK_FROZEN_BINDER:
+ seq_printf(m, "%shas frozen binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
+ seq_printf(m, "%shas cleared freeze notification\n", prefix);
+ break;
default:
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
break;
@@ -6183,6 +6568,10 @@ static void print_binder_proc(struct seq_file *m,
seq_puts(m, " has delivered dead binder\n");
break;
}
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ seq_puts(m, " has delivered freeze binder\n");
+ break;
+ }
binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
@@ -6209,7 +6598,9 @@ static const char * const binder_return_strings[] = {
"BR_FAILED_REPLY",
"BR_FROZEN_REPLY",
"BR_ONEWAY_SPAM_SUSPECT",
- "BR_TRANSACTION_PENDING_FROZEN"
+ "BR_TRANSACTION_PENDING_FROZEN",
+ "BR_FROZEN_BINDER",
+ "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_command_strings[] = {
@@ -6232,6 +6623,9 @@ static const char * const binder_command_strings[] = {
"BC_DEAD_BINDER_DONE",
"BC_TRANSACTION_SG",
"BC_REPLY_SG",
+ "BC_REQUEST_FREEZE_NOTIFICATION",
+ "BC_CLEAR_FREEZE_NOTIFICATION",
+ "BC_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_objstat_strings[] = {
@@ -6241,7 +6635,8 @@ static const char * const binder_objstat_strings[] = {
"ref",
"death",
"transaction",
- "transaction_complete"
+ "transaction_complete",
+ "freeze",
};
static void print_binder_stats(struct seq_file *m, const char *prefix,
@@ -6532,6 +6927,11 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
{} /* terminator */
};
+void binder_add_device(struct binder_device *device)
+{
+ hlist_add_head(&device->hlist, &binder_devices);
+}
+
static int __init init_binder_device(const char *name)
{
int ret;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2e1f261ec5c8..fcfaf1b899c8 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -61,7 +61,7 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
@@ -169,32 +169,33 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return buffer;
}
static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+ unsigned long index,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
- smp_store_release(&lru_page->page_ptr, page);
+ smp_store_release(&alloc->pages[index], page);
}
static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
{
/* Pairs with release in binder_set_installed_page() */
- return smp_load_acquire(&lru_page->page_ptr);
+ return smp_load_acquire(&alloc->pages[index]);
}
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, false, start, end);
@@ -202,65 +203,159 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (!binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
+ if (!page)
continue;
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ ret = list_lru_add(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
}
}
-static int binder_install_single_page(struct binder_alloc *alloc,
- struct binder_lru_page *lru_page,
- unsigned long addr)
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
{
- struct page *page;
- int ret = 0;
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
- if (!mmget_not_zero(alloc->mm))
- return -ESRCH;
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long npages = 0;
/*
- * Protected with mmap_sem in write mode as multiple tasks
- * might race to install the same page.
+ * Find an existing page in the remote mm. If missing,
+ * don't attempt to fault-in just propagate an error.
*/
- mmap_write_lock(alloc->mm);
- if (binder_get_installed_page(lru_page))
- goto out;
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+ &page, NULL);
+ mmap_read_unlock(mm);
- if (!alloc->vma) {
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto out;
+ return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
}
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+ unsigned long index)
+{
+ struct binder_shrinker_mdata *mdata;
+ struct page *page;
+
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* allocate and install shrinker metadata under page->private */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata) {
+ __free_page(page);
+ return NULL;
+ }
+
+ mdata->alloc = alloc;
+ mdata->page_index = index;
+ INIT_LIST_HEAD(&mdata->lru);
+ set_page_private(page, (unsigned long)mdata);
+
+ return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+ kfree((struct binder_shrinker_mdata *)page_private(page));
+ __free_page(page);
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ unsigned long index,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret;
+
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
+
+ page = binder_page_alloc(alloc, index);
if (!page) {
- pr_err("%d: failed to allocate page\n", alloc->pid);
ret = -ENOMEM;
goto out;
}
- ret = vm_insert_page(alloc->vma, addr, page);
- if (ret) {
+ ret = binder_page_insert(alloc, addr, page);
+ switch (ret) {
+ case -EBUSY:
+ /*
+ * EBUSY is ok. Someone installed the pte first but the
+ * alloc->pages[index] has not been updated yet. Discard
+ * our page and look up the one already installed.
+ */
+ ret = 0;
+ binder_free_page(page);
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
+ pr_err("%d: failed to find page at offset %lx\n",
+ alloc->pid, addr - alloc->vm_start);
+ ret = -ESRCH;
+ break;
+ }
+ fallthrough;
+ case 0:
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(alloc, index, page);
+ break;
+ default:
+ binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
- alloc->pid, __func__, addr - alloc->buffer, ret);
- __free_page(page);
- ret = -ENOMEM;
- goto out;
+ alloc->pid, __func__, addr - alloc->vm_start, ret);
+ break;
}
-
- /* Mark page installation complete and safe to use */
- binder_set_installed_page(lru_page, page);
out:
- mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm);
return ret;
}
@@ -269,7 +364,6 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
@@ -280,15 +374,13 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (binder_get_installed_page(alloc, index))
continue;
trace_binder_alloc_page_start(alloc, index);
- ret = binder_install_single_page(alloc, page, page_addr);
+ ret = binder_install_single_page(alloc, index, page_addr);
if (ret)
return ret;
@@ -302,8 +394,8 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, true, start, end);
@@ -311,13 +403,16 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
- if (page->page_ptr) {
+ if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -329,20 +424,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
}
}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- /* pairs with smp_load_acquire in binder_alloc_get_vma() */
- smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- /* pairs with smp_store_release in binder_alloc_set_vma() */
- return smp_load_acquire(&alloc->vma);
-}
-
static void debug_no_space_locked(struct binder_alloc *alloc)
{
size_t largest_alloc_size = 0;
@@ -576,7 +657,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -597,10 +678,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +689,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -674,8 +755,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->vm_start);
+ BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -734,14 +815,13 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
+ (buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
*pgoffp = pgoff;
- return lru_page->page_ptr;
+
+ return alloc->pages[index];
}
/**
@@ -785,17 +865,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc->lock if clear_on_free
- * is used frequently for large buffers. This lock is not
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -816,7 +896,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
const char *failure_string;
- int ret, i;
+ int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -834,22 +914,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = vma->vm_start;
+ alloc->vm_start = vma->vm_start;
- alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
- sizeof(alloc->pages[0]),
- GFP_KERNEL);
- if (alloc->pages == NULL) {
+ alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
+ sizeof(alloc->pages[0]),
+ GFP_KERNEL);
+ if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- alloc->pages[i].alloc = alloc;
- INIT_LIST_HEAD(&alloc->pages[i].lru);
- }
-
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -857,22 +932,22 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->user_data = alloc->buffer;
+ buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
err_alloc_buf_struct_failed:
- kfree(alloc->pages);
+ kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = 0;
+ alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -893,8 +968,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
- BUG_ON(alloc->vma);
+ mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -925,23 +1000,27 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ struct page *page;
bool on_lru;
- if (!alloc->pages[i].page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
continue;
- on_lru = list_lru_del_obj(&binder_freelist,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
+ binder_free_page(page);
page_count++;
}
- kfree(alloc->pages);
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
+ kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -964,17 +1043,17 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
- buffer->user_data - alloc->buffer,
+ buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -985,29 +1064,29 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{
- struct binder_lru_page *page;
+ struct page *page;
int i;
int active = 0;
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
free++;
- else if (list_empty(&page->lru))
+ else if (list_empty(page_to_lru(page)))
active++;
else
lru++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1102,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return count;
}
@@ -1036,18 +1115,18 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
/**
* binder_alloc_free_page() - shrinker callback to free pages
* @item: item to free
- * @lock: lock protecting the item
+ * @lru: list_lru instance of the item
* @cb_arg: callback argument
*
* Called from list_lru_walk() in binder_shrink_scan() to free
@@ -1055,44 +1134,54 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
*/
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock,
void *cb_arg)
- __must_hold(lock)
+ __must_hold(&lru->lock)
{
- struct binder_lru_page *page = container_of(item, typeof(*page), lru);
- struct binder_alloc *alloc = page->alloc;
+ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+ struct binder_alloc *alloc = mdata->alloc;
struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
- goto err_get_alloc_lock_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
-
- index = page - alloc->pages;
- page_addr = alloc->buffer + index * PAGE_SIZE;
-
- vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+
+ index = mdata->page_index;
+ page_addr = alloc->vm_start + index * PAGE_SIZE;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ /*
+ * Since a binder_alloc can only be mapped once, we ensure
+ * the vma corresponds to this mapping by checking whether
+ * the binder_alloc is still mapped.
+ */
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
- page_to_free = page->page_ptr;
- page->page_ptr = NULL;
+ page_to_free = alloc->pages[index];
+ binder_set_installed_page(alloc, index, NULL);
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
- spin_unlock(lock);
+ spin_unlock(&lru->lock);
if (vma) {
trace_binder_unmap_user_start(alloc, index);
@@ -1102,18 +1191,23 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
- __free_page(page_to_free);
+ binder_free_page(page_to_free);
- spin_lock(lock);
return LRU_REMOVED_RETRY;
err_invalid_vma:
-err_page_already_freed:
- spin_unlock(&alloc->lock);
-err_get_alloc_lock_failed:
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
@@ -1147,7 +1241,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
+ mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 70387234477e..feecd7414241 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,7 +9,7 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
@@ -59,34 +59,43 @@ struct binder_buffer {
};
/**
- * struct binder_lru_page - page object used for binder shrinker
- * @page_ptr: pointer to physical page in mmap'd space
- * @lru: entry in binder_freelist
- * @alloc: binder_alloc for a proc
+ * struct binder_shrinker_mdata - binder metadata used to reclaim pages
+ * @lru: LRU entry in binder_freelist
+ * @alloc: binder_alloc owning the page to reclaim
+ * @page_index: offset in @alloc->pages[] into the page to reclaim
*/
-struct binder_lru_page {
+struct binder_shrinker_mdata {
struct list_head lru;
- struct page *page_ptr;
struct binder_alloc *alloc;
+ unsigned long page_index;
};
+static inline struct list_head *page_to_lru(struct page *p)
+{
+ struct binder_shrinker_mdata *mdata;
+
+ mdata = (struct binder_shrinker_mdata *)page_private(p);
+
+ return &mdata->lru;
+}
+
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @lock: protects binder_alloc fields
- * @vma: vm_area_struct passed to mmap_handler
- * (invariant after mmap)
+ * @mutex: protects binder_alloc fields
* @mm: copy of task->mm (invariant after open)
- * @buffer: base of per-proc address space mapped via mmap
+ * @vm_start: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of binder_lru_page
+ * @pages: array of struct page *
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @mapped: whether the vm area is mapped, each binder instance is
+ * allowed a single mapping throughout its lifetime
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
@@ -96,18 +105,18 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- spinlock_t lock;
- struct vm_area_struct *vma;
+ struct mutex mutex;
struct mm_struct *mm;
- unsigned long buffer;
+ unsigned long vm_start;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct binder_lru_page *pages;
+ struct page **pages;
size_t buffer_size;
int pid;
size_t pages_high;
+ bool mapped;
bool oneway_spam_detected;
};
@@ -118,7 +127,7 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
#endif
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock, void *cb_arg);
+ void *cb_arg);
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
@@ -153,9 +162,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
size_t free_async_space;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
free_async_space = alloc->free_async_space;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return free_async_space;
}
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 81442fe20a69..c88735c54848 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -104,11 +104,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
end = PAGE_ALIGN(buffer->user_data + size);
page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
- page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
- if (!alloc->pages[page_index].page_ptr ||
- !list_empty(&alloc->pages[page_index].lru)) {
+ page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (!alloc->pages[page_index] ||
+ !list_empty(page_to_lru(alloc->pages[page_index]))) {
pr_err("expect alloc but is %s at page index %d\n",
- alloc->pages[page_index].page_ptr ?
+ alloc->pages[page_index] ?
"lru" : "free", page_index);
return false;
}
@@ -148,10 +148,10 @@ static void binder_selftest_free_buf(struct binder_alloc *alloc,
* if binder shrinker ran during binder_alloc_free_buf
* calls above.
*/
- if (list_empty(&alloc->pages[i].lru)) {
+ if (list_empty(page_to_lru(alloc->pages[i]))) {
pr_err_size_seq(sizes, seq);
pr_err("expect lru but is %s at page index %d\n",
- alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ alloc->pages[i] ? "alloc" : "free", i);
binder_selftest_failures++;
}
}
@@ -168,9 +168,9 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
}
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if (alloc->pages[i].page_ptr) {
+ if (alloc->pages[i]) {
pr_err("expect free but is %s at page index %d\n",
- list_empty(&alloc->pages[i].lru) ?
+ list_empty(page_to_lru(alloc->pages[i])) ?
"alloc" : "lru", i);
binder_selftest_failures++;
}
@@ -291,7 +291,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->mapped)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7270d4d22207..6a66c9769c6c 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -3,7 +3,6 @@
#ifndef _LINUX_BINDER_INTERNAL_H
#define _LINUX_BINDER_INTERNAL_H
-#include <linux/export.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
@@ -14,6 +13,7 @@
#include <linux/uidgid.h>
#include <uapi/linux/android/binderfs.h>
#include "binder_alloc.h"
+#include "dbitmap.h"
struct binder_context {
struct binder_node *binder_context_mgr_node;
@@ -24,8 +24,7 @@ struct binder_context {
/**
* struct binder_device - information about a binder device node
- * @hlist: list of binder devices (only used for devices requested via
- * CONFIG_ANDROID_BINDER_DEVICES)
+ * @hlist: list of binder devices
* @miscdev: information about a binder character device node
* @context: binder context information
* @binderfs_inode: This is the inode of the root dentry of the super block
@@ -129,12 +128,13 @@ enum binder_stat_types {
BINDER_STAT_DEATH,
BINDER_STAT_TRANSACTION,
BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_FREEZE,
BINDER_STAT_COUNT
};
struct binder_stats {
- atomic_t br[_IOC_NR(BR_TRANSACTION_PENDING_FROZEN) + 1];
- atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
+ atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
};
@@ -159,6 +159,8 @@ struct binder_work {
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ BINDER_WORK_FROZEN_BINDER,
+ BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
} type;
};
@@ -275,6 +277,14 @@ struct binder_ref_death {
binder_uintptr_t cookie;
};
+struct binder_ref_freeze {
+ struct binder_work work;
+ binder_uintptr_t cookie;
+ bool is_frozen:1;
+ bool sent:1;
+ bool resend:1;
+};
+
/**
* struct binder_ref_data - binder_ref counts and id
* @debug_id: unique ID for the ref
@@ -307,6 +317,8 @@ struct binder_ref_data {
* @node indicates the node must be freed
* @death: pointer to death notification (ref_death) if requested
* (protected by @node->lock)
+ * @freeze: pointer to freeze notification (ref_freeze) if requested
+ * (protected by @node->lock)
*
* Structure to track references from procA to target node (on procB). This
* structure is unsafe to access without holding @proc->outer_lock.
@@ -323,6 +335,7 @@ struct binder_ref {
struct binder_proc *proc;
struct binder_node *node;
struct binder_ref_death *death;
+ struct binder_ref_freeze *freeze;
};
/**
@@ -368,12 +381,16 @@ struct binder_ref {
* @freeze_wait: waitqueue of processes waiting for all outstanding
* transactions to be processed
* (protected by @inner_lock)
+ * @dmap dbitmap to manage available reference descriptors
+ * (protected by @outer_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @stats: per-process binder statistics
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
* (protected by @inner_lock)
+ * @delivered_freeze: list of delivered freeze notification
+ * (protected by @inner_lock)
* @max_threads: cap on number of binder threads
* (protected by @inner_lock)
* @requested_threads: number of binder threads requested but not
@@ -417,11 +434,12 @@ struct binder_proc {
bool sync_recv;
bool async_recv;
wait_queue_head_t freeze_wait;
-
+ struct dbitmap dmap;
struct list_head todo;
struct binder_stats stats;
struct list_head delivered_death;
- int max_threads;
+ struct list_head delivered_freeze;
+ u32 max_threads;
int requested_threads;
int requested_threads_started;
int tmp_ref;
@@ -562,4 +580,12 @@ struct binder_object {
};
};
+/**
+ * Add a binder device to binder_devices
+ * @device: the new binder device to add to the global list
+ *
+ * Not reentrant as the list is not protected by any locks
+ */
+void binder_add_device(struct binder_device *device);
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index fe38c6fc65d0..16de1b9e72f7 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -328,7 +328,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign(
__entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - alloc->buffer;
+ __entry->offset = start - alloc->vm_start;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 3001d754ac36..98da8c4eea59 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -58,6 +58,7 @@ enum binderfs_stats_mode {
struct binder_features {
bool oneway_spam_detection;
bool extended_error;
+ bool freeze_notification;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -74,6 +75,7 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = {
static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
+ .freeze_notification = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -185,7 +187,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode_lock(d_inode(root));
/* look it up */
- dentry = lookup_one_len(name, root, name_len);
+ dentry = lookup_noperm(&QSTR(name), root);
if (IS_ERR(dentry)) {
inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
@@ -205,6 +207,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
fsnotify_create(root->d_inode, dentry);
inode_unlock(d_inode(root));
+ binder_add_device(device);
+
return 0;
err:
@@ -270,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
+ hlist_del_init(&device->hlist);
kfree(device->context.name);
kfree(device);
}
@@ -482,7 +487,7 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent,
{
struct dentry *dentry;
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry))
return dentry;
@@ -608,6 +613,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
new file mode 100644
index 000000000000..956f1bd087d1
--- /dev/null
+++ b/drivers/android/dbitmap.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2024 Google LLC
+ *
+ * dbitmap - dynamically sized bitmap library.
+ *
+ * Used by the binder driver to optimize the allocation of the smallest
+ * available descriptor ID. Each bit in the bitmap represents the state
+ * of an ID.
+ *
+ * A dbitmap can grow or shrink as needed. This part has been designed
+ * considering that users might need to briefly release their locks in
+ * order to allocate memory for the new bitmap. These operations then,
+ * are verified to determine if the grow or shrink is sill valid.
+ *
+ * This library does not provide protection against concurrent access
+ * by itself. Binder uses the proc->outer_lock for this purpose.
+ */
+
+#ifndef _LINUX_DBITMAP_H
+#define _LINUX_DBITMAP_H
+#include <linux/bitmap.h>
+
+#define NBITS_MIN BITS_PER_TYPE(unsigned long)
+
+struct dbitmap {
+ unsigned int nbits;
+ unsigned long *map;
+};
+
+static inline int dbitmap_enabled(struct dbitmap *dmap)
+{
+ return !!dmap->nbits;
+}
+
+static inline void dbitmap_free(struct dbitmap *dmap)
+{
+ dmap->nbits = 0;
+ kfree(dmap->map);
+}
+
+/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
+static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap)
+{
+ unsigned int bit;
+
+ if (dmap->nbits <= NBITS_MIN)
+ return 0;
+
+ /*
+ * Determine if the bitmap can shrink based on the position of
+ * its last set bit. If the bit is within the first quarter of
+ * the bitmap then shrinking is possible. In this case, the
+ * bitmap should shrink to half its current size.
+ */
+ bit = find_last_bit(dmap->map, dmap->nbits);
+ if (bit < (dmap->nbits >> 2))
+ return dmap->nbits >> 1;
+
+ /* find_last_bit() returns dmap->nbits when no bits are set. */
+ if (bit == dmap->nbits)
+ return NBITS_MIN;
+
+ return 0;
+}
+
+/* Replace the internal bitmap with a new one of different size */
+static inline void
+dbitmap_replace(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ bitmap_copy(new, dmap->map, min(dmap->nbits, nbits));
+ kfree(dmap->map);
+ dmap->map = new;
+ dmap->nbits = nbits;
+}
+
+static inline void
+dbitmap_shrink(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ if (!new)
+ return;
+
+ /*
+ * Verify that shrinking to @nbits is still possible. The @new
+ * bitmap might have been allocated without locks, so this call
+ * could now be outdated. In this case, free @new and move on.
+ */
+ if (!dbitmap_enabled(dmap) || dbitmap_shrink_nbits(dmap) != nbits) {
+ kfree(new);
+ return;
+ }
+
+ dbitmap_replace(dmap, new, nbits);
+}
+
+/* Returns the nbits that a dbitmap can grow to. */
+static inline unsigned int dbitmap_grow_nbits(struct dbitmap *dmap)
+{
+ return dmap->nbits << 1;
+}
+
+static inline void
+dbitmap_grow(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ /*
+ * Verify that growing to @nbits is still possible. The @new
+ * bitmap might have been allocated without locks, so this call
+ * could now be outdated. In this case, free @new and move on.
+ */
+ if (!dbitmap_enabled(dmap) || nbits <= dmap->nbits) {
+ kfree(new);
+ return;
+ }
+
+ /*
+ * Check for ENOMEM after confirming the grow operation is still
+ * required. This ensures we only disable the dbitmap when it's
+ * necessary. Once the dbitmap is disabled, binder will fallback
+ * to slow_desc_lookup_olocked().
+ */
+ if (!new) {
+ dbitmap_free(dmap);
+ return;
+ }
+
+ dbitmap_replace(dmap, new, nbits);
+}
+
+/*
+ * Finds and sets the next zero bit in the bitmap. Upon success @bit
+ * is populated with the index and 0 is returned. Otherwise, -ENOSPC
+ * is returned to indicate that a dbitmap_grow() is needed.
+ */
+static inline int
+dbitmap_acquire_next_zero_bit(struct dbitmap *dmap, unsigned long offset,
+ unsigned long *bit)
+{
+ unsigned long n;
+
+ n = find_next_zero_bit(dmap->map, dmap->nbits, offset);
+ if (n == dmap->nbits)
+ return -ENOSPC;
+
+ *bit = n;
+ set_bit(n, dmap->map);
+
+ return 0;
+}
+
+static inline void
+dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit)
+{
+ clear_bit(bit, dmap->map);
+}
+
+static inline int dbitmap_init(struct dbitmap *dmap)
+{
+ dmap->map = bitmap_zalloc(NBITS_MIN, GFP_KERNEL);
+ if (!dmap->map) {
+ dmap->nbits = 0;
+ return -ENOMEM;
+ }
+
+ dmap->nbits = NBITS_MIN;
+
+ return 0;
+}
+#endif