tracing/kprobes: Use global event perf buffers in kprobe tracer
Use new percpu global event buffer instead of stack in kprobe tracer while tracing through perf. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Jim Keniston <jkenisto@us.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Frank Ch. Eigler <fche@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jason Baron <jbaron@redhat.com> Cc: K.Prasad <prasad@linux.vnet.ibm.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Tom Zanussi <tzanussi@gmail.com> LKML-Reference: <20090925182011.10157.60140.stgit@omoto> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
parent
d7a4b414ee
commit
a1a138d05f
@ -1149,35 +1149,49 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||
struct ftrace_event_call *call = &tp->call;
|
||||
struct kprobe_trace_entry *entry;
|
||||
int size, __size, i, pc;
|
||||
struct trace_entry *ent;
|
||||
int size, __size, i, pc, __cpu;
|
||||
unsigned long irq_flags;
|
||||
char *raw_data;
|
||||
|
||||
local_save_flags(irq_flags);
|
||||
pc = preempt_count();
|
||||
|
||||
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
return 0;
|
||||
|
||||
do {
|
||||
char raw_data[size];
|
||||
struct trace_entry *ent;
|
||||
/*
|
||||
* Zero dead bytes from alignment to avoid stack leak
|
||||
* to userspace
|
||||
*/
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
entry = (struct kprobe_trace_entry *)raw_data;
|
||||
ent = &entry->ent;
|
||||
/*
|
||||
* Protect the non nmi buffer
|
||||
* This also protects the rcu read side
|
||||
*/
|
||||
local_irq_save(irq_flags);
|
||||
__cpu = smp_processor_id();
|
||||
|
||||
tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
ent->type = call->id;
|
||||
entry->nargs = tp->nr_args;
|
||||
entry->ip = (unsigned long)kp->addr;
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ip, 1, entry, size);
|
||||
} while (0);
|
||||
if (in_nmi())
|
||||
raw_data = rcu_dereference(trace_profile_buf_nmi);
|
||||
else
|
||||
raw_data = rcu_dereference(trace_profile_buf);
|
||||
|
||||
if (!raw_data)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(raw_data, __cpu);
|
||||
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
entry = (struct kprobe_trace_entry *)raw_data;
|
||||
ent = &entry->ent;
|
||||
|
||||
tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
ent->type = call->id;
|
||||
entry->nargs = tp->nr_args;
|
||||
entry->ip = (unsigned long)kp->addr;
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ip, 1, entry, size);
|
||||
end:
|
||||
local_irq_restore(irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1188,33 +1202,50 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||
struct ftrace_event_call *call = &tp->call;
|
||||
struct kretprobe_trace_entry *entry;
|
||||
int size, __size, i, pc;
|
||||
struct trace_entry *ent;
|
||||
int size, __size, i, pc, __cpu;
|
||||
unsigned long irq_flags;
|
||||
char *raw_data;
|
||||
|
||||
local_save_flags(irq_flags);
|
||||
pc = preempt_count();
|
||||
|
||||
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
return 0;
|
||||
|
||||
do {
|
||||
char raw_data[size];
|
||||
struct trace_entry *ent;
|
||||
/*
|
||||
* Protect the non nmi buffer
|
||||
* This also protects the rcu read side
|
||||
*/
|
||||
local_irq_save(irq_flags);
|
||||
__cpu = smp_processor_id();
|
||||
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
entry = (struct kretprobe_trace_entry *)raw_data;
|
||||
ent = &entry->ent;
|
||||
if (in_nmi())
|
||||
raw_data = rcu_dereference(trace_profile_buf_nmi);
|
||||
else
|
||||
raw_data = rcu_dereference(trace_profile_buf);
|
||||
|
||||
tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
ent->type = call->id;
|
||||
entry->nargs = tp->nr_args;
|
||||
entry->func = (unsigned long)tp->rp.kp.addr;
|
||||
entry->ret_ip = (unsigned long)ri->ret_addr;
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
|
||||
} while (0);
|
||||
if (!raw_data)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(raw_data, __cpu);
|
||||
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
entry = (struct kretprobe_trace_entry *)raw_data;
|
||||
ent = &entry->ent;
|
||||
|
||||
tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
ent->type = call->id;
|
||||
entry->nargs = tp->nr_args;
|
||||
entry->func = (unsigned long)tp->rp.kp.addr;
|
||||
entry->ret_ip = (unsigned long)ri->ret_addr;
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
|
||||
end:
|
||||
local_irq_restore(irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user