tracing: Remove NR_CPUS array from trace_iterator

Replace the NR_CPUS array of buffer_iter from the trace_iterator
with an allocated array. This will just create an array of
possible CPUS instead of the max number specified.

The use of NR_CPUS in that array caused allocation failures for
machines that were tight on memory. This did not cause any failures
to the system itself (no crashes), but caused unnecessary failures
for reading the trace files.

Added a helper function called 'trace_buffer_iter()' that returns
the buffer_iter item or NULL if it is not defined or the array was
not allocated. Some routines do not require the array
(tracing_open_pipe() for one).

Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Steven Rostedt 2012-06-27 20:46:14 -04:00 committed by Steven Rostedt
parent b102f1d0f1
commit 6d158a813e
4 changed files with 28 additions and 11 deletions

View File

@ -65,7 +65,7 @@ struct trace_iterator {
void *private; void *private;
int cpu_file; int cpu_file;
struct mutex mutex; struct mutex mutex;
struct ring_buffer_iter *buffer_iter[NR_CPUS]; struct ring_buffer_iter **buffer_iter;
unsigned long iter_flags; unsigned long iter_flags;
/* trace_seq for __print_flags() and __print_symbolic() etc. */ /* trace_seq for __print_flags() and __print_symbolic() etc. */

View File

@ -1710,9 +1710,11 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
static void trace_iterator_increment(struct trace_iterator *iter) static void trace_iterator_increment(struct trace_iterator *iter)
{ {
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
iter->idx++; iter->idx++;
if (iter->buffer_iter[iter->cpu]) if (buf_iter)
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); ring_buffer_read(buf_iter, NULL);
} }
static struct trace_entry * static struct trace_entry *
@ -1720,7 +1722,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
unsigned long *lost_events) unsigned long *lost_events)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter) if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts); event = ring_buffer_iter_peek(buf_iter, ts);
@ -1858,10 +1860,10 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
tr->data[cpu]->skipped_entries = 0; tr->data[cpu]->skipped_entries = 0;
if (!iter->buffer_iter[cpu]) buf_iter = trace_buffer_iter(iter, cpu);
if (!buf_iter)
return; return;
buf_iter = iter->buffer_iter[cpu];
ring_buffer_iter_reset(buf_iter); ring_buffer_iter_reset(buf_iter);
/* /*
@ -2207,13 +2209,15 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
int trace_empty(struct trace_iterator *iter) int trace_empty(struct trace_iterator *iter)
{ {
struct ring_buffer_iter *buf_iter;
int cpu; int cpu;
/* If we are looking at one CPU buffer, only check that one */ /* If we are looking at one CPU buffer, only check that one */
if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
cpu = iter->cpu_file; cpu = iter->cpu_file;
if (iter->buffer_iter[cpu]) { buf_iter = trace_buffer_iter(iter, cpu);
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) if (buf_iter) {
if (!ring_buffer_iter_empty(buf_iter))
return 0; return 0;
} else { } else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@ -2223,8 +2227,9 @@ int trace_empty(struct trace_iterator *iter)
} }
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu]) { buf_iter = trace_buffer_iter(iter, cpu);
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) if (buf_iter) {
if (!ring_buffer_iter_empty(buf_iter))
return 0; return 0;
} else { } else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@ -2383,6 +2388,8 @@ __tracing_open(struct inode *inode, struct file *file)
if (!iter) if (!iter)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
GFP_KERNEL);
/* /*
* We make a copy of the current tracer to avoid concurrent * We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading. * changes on it while we are reading.
@ -2443,6 +2450,7 @@ __tracing_open(struct inode *inode, struct file *file)
fail: fail:
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
kfree(iter->trace); kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file); seq_release_private(inode, file);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -2483,6 +2491,7 @@ static int tracing_release(struct inode *inode, struct file *file)
mutex_destroy(&iter->mutex); mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started); free_cpumask_var(iter->started);
kfree(iter->trace); kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file); seq_release_private(inode, file);
return 0; return 0;
} }

View File

@ -317,6 +317,14 @@ struct tracer {
#define TRACE_PIPE_ALL_CPU -1 #define TRACE_PIPE_ALL_CPU -1
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
if (iter->buffer_iter && iter->buffer_iter[cpu])
return iter->buffer_iter[cpu];
return NULL;
}
int tracer_init(struct tracer *t, struct trace_array *tr); int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void); int tracing_is_enabled(void);
void trace_wake_up(void); void trace_wake_up(void);

View File

@ -538,7 +538,7 @@ get_return_for_leaf(struct trace_iterator *iter,
next = &data->ret; next = &data->ret;
} else { } else {
ring_iter = iter->buffer_iter[iter->cpu]; ring_iter = trace_buffer_iter(iter, iter->cpu);
/* First peek to compare current entry and the next one */ /* First peek to compare current entry and the next one */
if (ring_iter) if (ring_iter)