diff options
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r-- | drivers/android/binder.c | 112 |
1 files changed, 99 insertions, 13 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index b21a7b246a0d..0c2161b1f057 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1045,6 +1045,66 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, return NULL; } +/* Find the smallest unused descriptor the "slow way" */ +static u32 slow_desc_lookup_olocked(struct binder_proc *proc) +{ + struct binder_ref *ref; + struct rb_node *n; + u32 desc; + + desc = 1; + for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) { + ref = rb_entry(n, struct binder_ref, rb_node_desc); + if (ref->data.desc > desc) + break; + desc = ref->data.desc + 1; + } + + return desc; +} + +/* + * Find an available reference descriptor ID. The proc->outer_lock might + * be released in the process, in which case -EAGAIN is returned and the + * @desc should be considered invalid. + */ +static int get_ref_desc_olocked(struct binder_proc *proc, + struct binder_node *node, + u32 *desc) +{ + struct dbitmap *dmap = &proc->dmap; + unsigned long *new, bit; + unsigned int nbits; + + /* 0 is reserved for the context manager */ + if (node == proc->context->binder_context_mgr_node) { + *desc = 0; + return 0; + } + + if (!dbitmap_enabled(dmap)) { + *desc = slow_desc_lookup_olocked(proc); + return 0; + } + + if (dbitmap_acquire_first_zero_bit(dmap, &bit) == 0) { + *desc = bit; + return 0; + } + + /* + * The dbitmap is full and needs to grow. The proc->outer_lock + * is briefly released to allocate the new bitmap safely. + */ + nbits = dbitmap_grow_nbits(dmap); + binder_proc_unlock(proc); + new = bitmap_zalloc(nbits, GFP_KERNEL); + binder_proc_lock(proc); + dbitmap_grow(dmap, new, nbits); + + return -EAGAIN; +} + /** * binder_get_ref_for_node_olocked() - get the ref associated with given node * @proc: binder_proc that owns the ref @@ -1068,12 +1128,14 @@ static struct binder_ref *binder_get_ref_for_node_olocked( struct binder_node *node, struct binder_ref *new_ref) { - struct binder_context *context = proc->context; - struct rb_node **p = &proc->refs_by_node.rb_node; - struct rb_node *parent = NULL; struct binder_ref *ref; - struct rb_node *n; + struct rb_node *parent; + struct rb_node **p; + u32 desc; +retry: + p = &proc->refs_by_node.rb_node; + parent = NULL; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_node); @@ -1088,6 +1150,10 @@ static struct binder_ref *binder_get_ref_for_node_olocked( if (!new_ref) return NULL; + /* might release the proc->outer_lock */ + if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN) + goto retry; + binder_stats_created(BINDER_STAT_REF); new_ref->data.debug_id = atomic_inc_return(&binder_last_id); new_ref->proc = proc; @@ -1095,14 +1161,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked( rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); - new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; - for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { - ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (ref->data.desc > new_ref->data.desc) - break; - new_ref->data.desc = ref->data.desc + 1; - } - + new_ref->data.desc = desc; p = &proc->refs_by_desc.rb_node; while (*p) { parent = *p; @@ -1131,6 +1190,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked( static void binder_cleanup_ref_olocked(struct binder_ref *ref) { + struct dbitmap *dmap = &ref->proc->dmap; bool delete_node = false; binder_debug(BINDER_DEBUG_INTERNAL_REFS, @@ -1138,6 +1198,8 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref) ref->proc->pid, ref->data.debug_id, ref->data.desc, ref->node->debug_id); + if (dbitmap_enabled(dmap)) + dbitmap_clear_bit(dmap, ref->data.desc); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); @@ -1298,6 +1360,25 @@ static void binder_free_ref(struct binder_ref *ref) kfree(ref); } +/* shrink descriptor bitmap if needed */ +static void try_shrink_dmap(struct binder_proc *proc) +{ + unsigned long *new; + int nbits; + + binder_proc_lock(proc); + nbits = dbitmap_shrink_nbits(&proc->dmap); + binder_proc_unlock(proc); + + if (!nbits) + return; + + new = bitmap_zalloc(nbits, GFP_KERNEL); + binder_proc_lock(proc); + dbitmap_shrink(&proc->dmap, new, nbits); + binder_proc_unlock(proc); +} + /** * binder_update_ref_for_handle() - inc/dec the ref for given handle * @proc: proc containing the ref @@ -1334,8 +1415,10 @@ static int binder_update_ref_for_handle(struct binder_proc *proc, *rdata = ref->data; binder_proc_unlock(proc); - if (delete_ref) + if (delete_ref) { binder_free_ref(ref); + try_shrink_dmap(proc); + } return ret; err_no_ref: @@ -4931,6 +5014,7 @@ static void binder_free_proc(struct binder_proc *proc) put_task_struct(proc->tsk); put_cred(proc->cred); binder_stats_deleted(BINDER_STAT_PROC); + dbitmap_free(&proc->dmap); kfree(proc); } @@ -5634,6 +5718,8 @@ static int binder_open(struct inode *nodp, struct file *filp) proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; + + dbitmap_init(&proc->dmap); spin_lock_init(&proc->inner_lock); spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); |