tracing: Factorize the events profile accounting

Factorize the events enabling accounting in a common tracing core
helper. This reduces the size of the profile_enable() and
profile_disable() callbacks for each trace events.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Frederic Weisbecker 2009-09-18 00:54:43 +02:00
parent 0efb4d2072
commit e5e25cf47b
4 changed files with 36 additions and 40 deletions

View File

@ -130,8 +130,8 @@ struct ftrace_event_call {
void *data;
atomic_t profile_count;
int (*profile_enable)(struct ftrace_event_call *);
void (*profile_disable)(struct ftrace_event_call *);
int (*profile_enable)(void);
void (*profile_disable)(void);
};
#define MAX_FILTER_PRED 32

View File

@ -100,33 +100,25 @@ struct perf_counter_attr;
#ifdef CONFIG_EVENT_PROFILE
#define TRACE_SYS_ENTER_PROFILE(sname) \
static int prof_sysenter_enable_##sname(struct ftrace_event_call *event_call) \
static int prof_sysenter_enable_##sname(void) \
{ \
int ret = 0; \
if (!atomic_inc_return(&event_enter_##sname.profile_count)) \
ret = reg_prof_syscall_enter("sys"#sname); \
return ret; \
return reg_prof_syscall_enter("sys"#sname); \
} \
\
static void prof_sysenter_disable_##sname(struct ftrace_event_call *event_call)\
static void prof_sysenter_disable_##sname(void) \
{ \
if (atomic_add_negative(-1, &event_enter_##sname.profile_count)) \
unreg_prof_syscall_enter("sys"#sname); \
unreg_prof_syscall_enter("sys"#sname); \
}
#define TRACE_SYS_EXIT_PROFILE(sname) \
static int prof_sysexit_enable_##sname(struct ftrace_event_call *event_call) \
static int prof_sysexit_enable_##sname(void) \
{ \
int ret = 0; \
if (!atomic_inc_return(&event_exit_##sname.profile_count)) \
ret = reg_prof_syscall_exit("sys"#sname); \
return ret; \
return reg_prof_syscall_exit("sys"#sname); \
} \
\
static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \
static void prof_sysexit_disable_##sname(void) \
{ \
if (atomic_add_negative(-1, &event_exit_##sname.profile_count)) \
unreg_prof_syscall_exit("sys"#sname); \
unreg_prof_syscall_exit("sys"#sname); \
}
#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \

View File

@ -382,20 +382,14 @@ static inline int ftrace_get_offsets_##call( \
*
* NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
*
* static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
* static int ftrace_profile_enable_<call>(void)
* {
* int ret = 0;
*
* if (!atomic_inc_return(&event_call->profile_count))
* ret = register_trace_<call>(ftrace_profile_<call>);
*
* return ret;
* return register_trace_<call>(ftrace_profile_<call>);
* }
*
* static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
* static void ftrace_profile_disable_<call>(void)
* {
* if (atomic_add_negative(-1, &event->call->profile_count))
* unregister_trace_<call>(ftrace_profile_<call>);
* unregister_trace_<call>(ftrace_profile_<call>);
* }
*
*/
@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \
\
static void ftrace_profile_##call(proto); \
\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
static int ftrace_profile_enable_##call(void) \
{ \
int ret = 0; \
\
if (!atomic_inc_return(&event_call->profile_count)) \
ret = register_trace_##call(ftrace_profile_##call); \
\
return ret; \
return register_trace_##call(ftrace_profile_##call); \
} \
\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
static void ftrace_profile_disable_##call(void) \
{ \
if (atomic_add_negative(-1, &event_call->profile_count)) \
unregister_trace_##call(ftrace_profile_##call); \
unregister_trace_##call(ftrace_profile_##call); \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

View File

@ -8,6 +8,14 @@
#include <linux/module.h>
#include "trace.h"
static int ftrace_profile_enable_event(struct ftrace_event_call *event)
{
if (atomic_inc_return(&event->profile_count))
return 0;
return event->profile_enable();
}
int ftrace_profile_enable(int event_id)
{
struct ftrace_event_call *event;
@ -17,7 +25,7 @@ int ftrace_profile_enable(int event_id)
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id && event->profile_enable &&
try_module_get(event->mod)) {
ret = event->profile_enable(event);
ret = ftrace_profile_enable_event(event);
break;
}
}
@ -26,6 +34,14 @@ int ftrace_profile_enable(int event_id)
return ret;
}
static void ftrace_profile_disable_event(struct ftrace_event_call *event)
{
if (!atomic_add_negative(-1, &event->profile_count))
return;
event->profile_disable();
}
void ftrace_profile_disable(int event_id)
{
struct ftrace_event_call *event;
@ -33,7 +49,7 @@ void ftrace_profile_disable(int event_id)
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id) {
event->profile_disable(event);
ftrace_profile_disable_event(event);
module_put(event->mod);
break;
}