diff options
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r-- | drivers/android/binder_alloc.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index a3e56637db4f..a4a4dc87ba53 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -169,9 +169,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, { struct binder_buffer *buffer; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); return buffer; } @@ -597,10 +597,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, if (!next) return ERR_PTR(-ENOMEM); - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); if (IS_ERR(buffer)) { - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); goto out; } @@ -608,7 +608,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, buffer->offsets_size = offsets_size; buffer->extra_buffers_size = extra_buffers_size; buffer->pid = current->tgid; - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); ret = binder_install_buffer_pages(alloc, buffer, size); if (ret) { @@ -785,17 +785,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc, * We could eliminate the call to binder_alloc_clear_buf() * from binder_alloc_deferred_release() by moving this to * binder_free_buf_locked(). However, that could - * increase contention for the alloc mutex if clear_on_free - * is used frequently for large buffers. The mutex is not + * increase contention for the alloc->lock if clear_on_free + * is used frequently for large buffers. This lock is not * needed for correctness here. */ if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); binder_free_buf_locked(alloc, buffer); - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); } /** @@ -893,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) struct binder_buffer *buffer; buffers = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); BUG_ON(alloc->vma); while ((n = rb_first(&alloc->allocated_buffers))) { @@ -943,7 +943,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) } kfree(alloc->pages); } - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); if (alloc->mm) mmdrop(alloc->mm); @@ -966,7 +966,7 @@ void binder_alloc_print_allocated(struct seq_file *m, struct binder_buffer *buffer; struct rb_node *n; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { buffer = rb_entry(n, struct binder_buffer, rb_node); seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", @@ -976,7 +976,7 @@ void binder_alloc_print_allocated(struct seq_file *m, buffer->extra_buffers_size, buffer->transaction ? "active" : "delivered"); } - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); } /** @@ -993,7 +993,7 @@ void binder_alloc_print_pages(struct seq_file *m, int lru = 0; int free = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); /* * Make sure the binder_alloc is fully initialized, otherwise we might * read inconsistent state. @@ -1009,7 +1009,7 @@ void binder_alloc_print_pages(struct seq_file *m, lru++; } } - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } @@ -1025,10 +1025,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) struct rb_node *n; int count = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) count++; - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); return count; } @@ -1073,8 +1073,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item, goto err_mmget; if (!mmap_read_trylock(mm)) goto err_mmap_read_lock_failed; - if (!mutex_trylock(&alloc->mutex)) - goto err_get_alloc_mutex_failed; + if (!spin_trylock(&alloc->lock)) + goto err_get_alloc_lock_failed; if (!page->page_ptr) goto err_page_already_freed; @@ -1093,7 +1093,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_kernel_end(alloc, index); list_lru_isolate(lru, item); - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); spin_unlock(lock); if (vma) { @@ -1113,8 +1113,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item, err_invalid_vma: err_page_already_freed: - mutex_unlock(&alloc->mutex); -err_get_alloc_mutex_failed: + spin_unlock(&alloc->lock); +err_get_alloc_lock_failed: mmap_read_unlock(mm); err_mmap_read_lock_failed: mmput_async(mm); @@ -1149,7 +1149,7 @@ void binder_alloc_init(struct binder_alloc *alloc) alloc->pid = current->group_leader->pid; alloc->mm = current->mm; mmgrab(alloc->mm); - mutex_init(&alloc->mutex); + spin_lock_init(&alloc->lock); INIT_LIST_HEAD(&alloc->buffers); } |