ftrace: Use synchronize_rcu_tasks_rude() instead of ftrace_sync()
This commit replaces the schedule_on_each_cpu(ftrace_sync) instances with synchronize_rcu_tasks_rude(). Suggested-by: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@redhat.com> [ paulmck: Make Kconfig adjustments noted by kbuild test robot. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
@@ -158,6 +158,7 @@ config FUNCTION_TRACER
|
|||||||
select CONTEXT_SWITCH_TRACER
|
select CONTEXT_SWITCH_TRACER
|
||||||
select GLOB
|
select GLOB
|
||||||
select TASKS_RCU if PREEMPTION
|
select TASKS_RCU if PREEMPTION
|
||||||
|
select TASKS_RUDE_RCU
|
||||||
help
|
help
|
||||||
Enable the kernel to trace every kernel function. This is done
|
Enable the kernel to trace every kernel function. This is done
|
||||||
by using a compiler feature to insert a small, 5-byte No-Operation
|
by using a compiler feature to insert a small, 5-byte No-Operation
|
||||||
|
|||||||
@@ -160,17 +160,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
|
|||||||
op->saved_func(ip, parent_ip, op, regs);
|
op->saved_func(ip, parent_ip, op, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_sync(struct work_struct *work)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* This function is just a stub to implement a hard force
|
|
||||||
* of synchronize_rcu(). This requires synchronizing
|
|
||||||
* tasks even in userspace and idle.
|
|
||||||
*
|
|
||||||
* Yes, function tracing is rude.
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ftrace_sync_ipi(void *data)
|
static void ftrace_sync_ipi(void *data)
|
||||||
{
|
{
|
||||||
/* Probably not needed, but do it anyway */
|
/* Probably not needed, but do it anyway */
|
||||||
@@ -256,7 +245,7 @@ static void update_ftrace_function(void)
|
|||||||
* Make sure all CPUs see this. Yes this is slow, but static
|
* Make sure all CPUs see this. Yes this is slow, but static
|
||||||
* tracing is slow and nasty to have enabled.
|
* tracing is slow and nasty to have enabled.
|
||||||
*/
|
*/
|
||||||
schedule_on_each_cpu(ftrace_sync);
|
synchronize_rcu_tasks_rude();
|
||||||
/* Now all cpus are using the list ops. */
|
/* Now all cpus are using the list ops. */
|
||||||
function_trace_op = set_function_trace_op;
|
function_trace_op = set_function_trace_op;
|
||||||
/* Make sure the function_trace_op is visible on all CPUs */
|
/* Make sure the function_trace_op is visible on all CPUs */
|
||||||
@@ -2932,7 +2921,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
|||||||
* infrastructure to do the synchronization, thus we must do it
|
* infrastructure to do the synchronization, thus we must do it
|
||||||
* ourselves.
|
* ourselves.
|
||||||
*/
|
*/
|
||||||
schedule_on_each_cpu(ftrace_sync);
|
synchronize_rcu_tasks_rude();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When the kernel is preeptive, tasks can be preempted
|
* When the kernel is preeptive, tasks can be preempted
|
||||||
@@ -5887,7 +5876,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
|
|||||||
* infrastructure to do the synchronization, thus we must do it
|
* infrastructure to do the synchronization, thus we must do it
|
||||||
* ourselves.
|
* ourselves.
|
||||||
*/
|
*/
|
||||||
schedule_on_each_cpu(ftrace_sync);
|
synchronize_rcu_tasks_rude();
|
||||||
|
|
||||||
free_ftrace_hash(old_hash);
|
free_ftrace_hash(old_hash);
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user