forked from Minki/linux
Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
This commit is contained in:
commit
6f5e3577d4
@ -473,7 +473,7 @@ struct ring_buffer_per_cpu {
|
||||
int nr_pages_to_update;
|
||||
struct list_head new_pages; /* new pages to add */
|
||||
struct work_struct update_pages_work;
|
||||
struct completion update_completion;
|
||||
struct completion update_done;
|
||||
};
|
||||
|
||||
struct ring_buffer {
|
||||
@ -1058,7 +1058,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
|
||||
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
|
||||
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
|
||||
init_completion(&cpu_buffer->update_completion);
|
||||
init_completion(&cpu_buffer->update_done);
|
||||
|
||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
@ -1461,7 +1461,7 @@ static void update_pages_handler(struct work_struct *work)
|
||||
struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
|
||||
struct ring_buffer_per_cpu, update_pages_work);
|
||||
rb_update_pages(cpu_buffer);
|
||||
complete(&cpu_buffer->update_completion);
|
||||
complete(&cpu_buffer->update_done);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1534,39 +1534,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
|
||||
get_online_cpus();
|
||||
/*
|
||||
* Fire off all the required work handlers
|
||||
* Look out for offline CPUs
|
||||
*/
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
if (!cpu_buffer->nr_pages_to_update ||
|
||||
!cpu_online(cpu))
|
||||
continue;
|
||||
|
||||
schedule_work_on(cpu, &cpu_buffer->update_pages_work);
|
||||
}
|
||||
/*
|
||||
* This loop is for the CPUs that are not online.
|
||||
* We can't schedule anything on them, but it's not necessary
|
||||
* We can't schedule on offline CPUs, but it's not necessary
|
||||
* since we can change their buffer sizes without any race.
|
||||
*/
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
if (!cpu_buffer->nr_pages_to_update ||
|
||||
cpu_online(cpu))
|
||||
if (!cpu_buffer->nr_pages_to_update)
|
||||
continue;
|
||||
|
||||
rb_update_pages(cpu_buffer);
|
||||
if (cpu_online(cpu))
|
||||
schedule_work_on(cpu,
|
||||
&cpu_buffer->update_pages_work);
|
||||
else
|
||||
rb_update_pages(cpu_buffer);
|
||||
}
|
||||
|
||||
/* wait for all the updates to complete */
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
if (!cpu_buffer->nr_pages_to_update ||
|
||||
!cpu_online(cpu))
|
||||
if (!cpu_buffer->nr_pages_to_update)
|
||||
continue;
|
||||
|
||||
wait_for_completion(&cpu_buffer->update_completion);
|
||||
/* reset this value */
|
||||
if (cpu_online(cpu))
|
||||
wait_for_completion(&cpu_buffer->update_done);
|
||||
cpu_buffer->nr_pages_to_update = 0;
|
||||
}
|
||||
|
||||
@ -1593,13 +1583,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
|
||||
if (cpu_online(cpu_id)) {
|
||||
schedule_work_on(cpu_id,
|
||||
&cpu_buffer->update_pages_work);
|
||||
wait_for_completion(&cpu_buffer->update_completion);
|
||||
wait_for_completion(&cpu_buffer->update_done);
|
||||
} else
|
||||
rb_update_pages(cpu_buffer);
|
||||
|
||||
put_online_cpus();
|
||||
/* reset this value */
|
||||
cpu_buffer->nr_pages_to_update = 0;
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -763,8 +763,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
* Register a new plugin tracer.
|
||||
*/
|
||||
int register_tracer(struct tracer *type)
|
||||
__releases(kernel_lock)
|
||||
__acquires(kernel_lock)
|
||||
{
|
||||
struct tracer *t;
|
||||
int ret = 0;
|
||||
@ -5114,7 +5112,8 @@ __init static int tracer_alloc_buffers(void)
|
||||
max_tr.data[i] = &per_cpu(max_tr_data, i);
|
||||
}
|
||||
|
||||
set_buffer_entries(&global_trace, ring_buf_size);
|
||||
set_buffer_entries(&global_trace,
|
||||
ring_buffer_size(global_trace.buffer, 0));
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
set_buffer_entries(&max_tr, 1);
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user