summaryrefslogtreecommitdiff
path: root/drivers/android
diff options
context:
space:
mode:
authorCarlos Llamas <cmllamas@google.com>2023-12-01 17:21:50 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-12-05 09:23:40 +0900
commitde0e6573125f8ea7a01a9b05a45b0c73116c73b2 (patch)
treeadd3fbc05ce90f3c23663b8c15385ece392a3c24 /drivers/android
parent258ce20ede33c551002705fa1488864fb287752c (diff)
binder: make oversized buffer code more readable
The sections in binder_alloc_new_buf_locked() dealing with oversized buffers are scattered which makes them difficult to read. Instead, consolidate this code into a single block to improve readability. No functional change here. Signed-off-by: Carlos Llamas <cmllamas@google.com> Reviewed-by: Alice Ryhl <aliceryhl@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-22-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android')
-rw-r--r--drivers/android/binder_alloc.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index c9292eee8fee..ad9b73c6ddb7 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -483,32 +483,31 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
goto out;
}
- if (n == NULL) {
+ if (buffer_size != size) {
+ /* Found an oversized buffer and needs to be split */
buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ WARN_ON(n || buffer_size == size);
+ new_buffer->user_data = buffer->user_data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ new_buffer = NULL;
}
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size);
- WARN_ON(n && buffer_size != size);
-
has_page_addr = (buffer->user_data + buffer_size) & PAGE_MASK;
end_page_addr = PAGE_ALIGN(buffer->user_data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
end_page_addr);
- if (buffer_size != size) {
- new_buffer->user_data = buffer->user_data + size;
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(alloc, new_buffer);
- new_buffer = NULL;
- }
- rb_erase(best_fit, &alloc->free_buffers);
+ rb_erase(&buffer->rb_node, &alloc->free_buffers);
buffer->free = 0;
buffer->allow_user_free = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);