forked from Minki/linux
ftrace: add logic to record overruns
This patch sets up the infrastructure to record overruns of the tracing buffer. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
72b59d67f8
commit
53d0aa7730
@ -609,6 +609,7 @@ void unregister_tracer(struct tracer *type)
|
||||
void tracing_reset(struct trace_array_cpu *data)
|
||||
{
|
||||
data->trace_idx = 0;
|
||||
data->overrun = 0;
|
||||
data->trace_head = data->trace_tail = head_page(data);
|
||||
data->trace_head_idx = 0;
|
||||
data->trace_tail_idx = 0;
|
||||
@ -750,6 +751,7 @@ tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
|
||||
if (data->trace_head == data->trace_tail &&
|
||||
idx_next == data->trace_tail_idx) {
|
||||
/* overrun */
|
||||
data->overrun++;
|
||||
data->trace_tail_idx++;
|
||||
if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
|
||||
data->trace_tail =
|
||||
@ -2353,8 +2355,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||
{
|
||||
struct trace_iterator *iter = filp->private_data;
|
||||
struct trace_array_cpu *data;
|
||||
struct trace_array *tr = iter->tr;
|
||||
struct tracer *tracer = iter->trace;
|
||||
static cpumask_t mask;
|
||||
static int start;
|
||||
unsigned long flags;
|
||||
@ -2433,10 +2433,11 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||
if (cnt >= PAGE_SIZE)
|
||||
cnt = PAGE_SIZE - 1;
|
||||
|
||||
memset(iter, 0, sizeof(*iter));
|
||||
iter->tr = tr;
|
||||
iter->trace = tracer;
|
||||
/* reset all but tr, trace, and overruns */
|
||||
iter->pos = -1;
|
||||
memset(&iter->seq, 0,
|
||||
sizeof(struct trace_iterator) -
|
||||
offsetof(struct trace_iterator, seq));
|
||||
|
||||
/*
|
||||
* We need to stop all tracing on all CPUS to read the
|
||||
@ -2465,6 +2466,11 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||
for_each_cpu_mask(cpu, mask) {
|
||||
data = iter->tr->data[cpu];
|
||||
__raw_spin_lock(&data->lock);
|
||||
|
||||
if (data->overrun > iter->last_overrun[cpu])
|
||||
iter->overrun[cpu] +=
|
||||
data->overrun - iter->last_overrun[cpu];
|
||||
iter->last_overrun[cpu] = data->overrun;
|
||||
}
|
||||
|
||||
while (find_next_entry_inc(iter) != NULL) {
|
||||
|
@ -97,6 +97,7 @@ struct trace_array_cpu {
|
||||
void *trace_head; /* producer */
|
||||
void *trace_tail; /* consumer */
|
||||
unsigned long trace_idx;
|
||||
unsigned long overrun;
|
||||
unsigned long saved_latency;
|
||||
unsigned long critical_start;
|
||||
unsigned long critical_end;
|
||||
@ -157,10 +158,13 @@ struct trace_seq {
|
||||
* results to users and which routines might sleep, etc:
|
||||
*/
|
||||
struct trace_iterator {
|
||||
struct trace_seq seq;
|
||||
struct trace_array *tr;
|
||||
struct tracer *trace;
|
||||
long last_overrun[NR_CPUS];
|
||||
long overrun[NR_CPUS];
|
||||
|
||||
/* The below is zeroed out in pipe_read */
|
||||
struct trace_seq seq;
|
||||
struct trace_entry *ent;
|
||||
int cpu;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user