mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
binder: remove pid param in binder_alloc_new_buf()
Binder attributes the buffer allocation to the current->tgid everytime. There is no need to pass this as a parameter so drop it. Also add a few touchups to follow the coding guidelines. No functional changes are introduced in this patch. Reviewed-by: Alice Ryhl <aliceryhl@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-13-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
377e1684db
commit
89f71743bf
@ -3225,7 +3225,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
|
||||
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
|
||||
tr->offsets_size, extra_buffers_size,
|
||||
!reply && (t->flags & TF_ONE_WAY), current->tgid);
|
||||
!reply && (t->flags & TF_ONE_WAY));
|
||||
if (IS_ERR(t->buffer)) {
|
||||
char *s;
|
||||
|
||||
|
@ -319,7 +319,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
|
||||
return smp_load_acquire(&alloc->vma);
|
||||
}
|
||||
|
||||
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
|
||||
static bool debug_low_async_space_locked(struct binder_alloc *alloc)
|
||||
{
|
||||
/*
|
||||
* Find the amount and size of buffers allocated by the current caller;
|
||||
@ -328,10 +328,11 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
|
||||
* and at some point we'll catch them in the act. This is more efficient
|
||||
* than keeping a map per pid.
|
||||
*/
|
||||
struct rb_node *n;
|
||||
struct binder_buffer *buffer;
|
||||
size_t total_alloc_size = 0;
|
||||
int pid = current->tgid;
|
||||
size_t num_buffers = 0;
|
||||
struct rb_node *n;
|
||||
|
||||
for (n = rb_first(&alloc->allocated_buffers); n != NULL;
|
||||
n = rb_next(n)) {
|
||||
@ -364,8 +365,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
|
||||
static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
struct binder_alloc *alloc,
|
||||
size_t size,
|
||||
int is_async,
|
||||
int pid)
|
||||
int is_async)
|
||||
{
|
||||
struct rb_node *n = alloc->free_buffers.rb_node;
|
||||
struct binder_buffer *buffer;
|
||||
@ -476,7 +476,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
"%d: binder_alloc_buf size %zd got %pK\n",
|
||||
alloc->pid, size, buffer);
|
||||
buffer->async_transaction = is_async;
|
||||
buffer->pid = pid;
|
||||
buffer->oneway_spam_suspect = false;
|
||||
if (is_async) {
|
||||
alloc->free_async_space -= size;
|
||||
@ -489,7 +488,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
* of async space left (which is less than 10% of total
|
||||
* buffer size).
|
||||
*/
|
||||
buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
|
||||
buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc);
|
||||
} else {
|
||||
alloc->oneway_spam_detected = false;
|
||||
}
|
||||
@ -532,7 +531,6 @@ static inline size_t sanitized_size(size_t data_size,
|
||||
* @offsets_size: user specified buffer offset
|
||||
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
|
||||
* @is_async: buffer for async transaction
|
||||
* @pid: pid to attribute allocation to (used for debugging)
|
||||
*
|
||||
* Allocate a new buffer given the requested sizes. Returns
|
||||
* the kernel version of the buffer pointer. The size allocated
|
||||
@ -545,8 +543,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async,
|
||||
int pid)
|
||||
int is_async)
|
||||
{
|
||||
struct binder_buffer *buffer;
|
||||
size_t size;
|
||||
@ -569,7 +566,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
}
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
buffer = binder_alloc_new_buf_locked(alloc, size, is_async, pid);
|
||||
buffer = binder_alloc_new_buf_locked(alloc, size, is_async);
|
||||
if (IS_ERR(buffer)) {
|
||||
mutex_unlock(&alloc->mutex);
|
||||
goto out;
|
||||
@ -578,6 +575,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
buffer->data_size = data_size;
|
||||
buffer->offsets_size = offsets_size;
|
||||
buffer->extra_buffers_size = extra_buffers_size;
|
||||
buffer->pid = current->tgid;
|
||||
mutex_unlock(&alloc->mutex);
|
||||
|
||||
out:
|
||||
|
@ -49,15 +49,13 @@ struct binder_buffer {
|
||||
unsigned async_transaction:1;
|
||||
unsigned oneway_spam_suspect:1;
|
||||
unsigned debug_id:27;
|
||||
|
||||
struct binder_transaction *transaction;
|
||||
|
||||
struct binder_node *target_node;
|
||||
size_t data_size;
|
||||
size_t offsets_size;
|
||||
size_t extra_buffers_size;
|
||||
unsigned long user_data;
|
||||
int pid;
|
||||
int pid;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -125,8 +123,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async,
|
||||
int pid);
|
||||
int is_async);
|
||||
void binder_alloc_init(struct binder_alloc *alloc);
|
||||
int binder_alloc_shrinker_init(void);
|
||||
void binder_alloc_shrinker_exit(void);
|
||||
|
@ -119,7 +119,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BUFFER_NUM; i++) {
|
||||
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
|
||||
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
|
||||
if (IS_ERR(buffers[i]) ||
|
||||
!check_buffer_pages_allocated(alloc, buffers[i],
|
||||
sizes[i])) {
|
||||
|
Loading…
Reference in New Issue
Block a user