summaryrefslogtreecommitdiff
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorCarlos Llamas <cmllamas@google.com>2023-12-01 17:21:41 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-12-05 09:23:39 +0900
commit89f71743bf42217dd4092fda703a8e4f6f4e55ac (patch)
tree97c3b051c5cd6fad7b769c389cdd99698bf4c2c4 /drivers/android/binder_alloc.c
parent377e1684db7a1e23261f3c3ebf76523c0554d512 (diff)
binder: remove pid param in binder_alloc_new_buf()
Binder attributes the buffer allocation to the current->tgid everytime. There is no need to pass this as a parameter so drop it. Also add a few touchups to follow the coding guidelines. No functional changes are introduced in this patch. Reviewed-by: Alice Ryhl <aliceryhl@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-13-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 40a2ca0c0dea..b5c3e56318e1 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -319,7 +319,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return smp_load_acquire(&alloc->vma);
}
-static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+static bool debug_low_async_space_locked(struct binder_alloc *alloc)
{
/*
* Find the amount and size of buffers allocated by the current caller;
@@ -328,10 +328,11 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
* and at some point we'll catch them in the act. This is more efficient
* than keeping a map per pid.
*/
- struct rb_node *n;
struct binder_buffer *buffer;
size_t total_alloc_size = 0;
+ int pid = current->tgid;
size_t num_buffers = 0;
+ struct rb_node *n;
for (n = rb_first(&alloc->allocated_buffers); n != NULL;
n = rb_next(n)) {
@@ -364,8 +365,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t size,
- int is_async,
- int pid)
+ int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -476,7 +476,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
buffer->async_transaction = is_async;
- buffer->pid = pid;
buffer->oneway_spam_suspect = false;
if (is_async) {
alloc->free_async_space -= size;
@@ -489,7 +488,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
* of async space left (which is less than 10% of total
* buffer size).
*/
- buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
+ buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc);
} else {
alloc->oneway_spam_detected = false;
}
@@ -532,7 +531,6 @@ static inline size_t sanitized_size(size_t data_size,
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
- * @pid: pid to attribute allocation to (used for debugging)
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
@@ -545,8 +543,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async,
- int pid)
+ int is_async)
{
struct binder_buffer *buffer;
size_t size;
@@ -569,7 +566,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
}
mutex_lock(&alloc->mutex);
- buffer = binder_alloc_new_buf_locked(alloc, size, is_async, pid);
+ buffer = binder_alloc_new_buf_locked(alloc, size, is_async);
if (IS_ERR(buffer)) {
mutex_unlock(&alloc->mutex);
goto out;
@@ -578,6 +575,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
+ buffer->pid = current->tgid;
mutex_unlock(&alloc->mutex);
out: