tracing: Remove taking of trace_types_lock in pipe files

Taking the global mutex "trace_types_lock" in the trace_pipe files
causes a bottle neck as most the pipe files can be read per cpu
and there's no reason to serialize them.

The current_trace variable was given a ref count and it can not
change when the ref count is not zero. Opening the trace_pipe
files will up the ref count (and decremented on close), so that
the lock no longer needs to be taken when accessing the
current_trace variable.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Steven Rostedt (Red Hat) 2014-12-15 22:31:07 -05:00 committed by Steven Rostedt
parent cf6ab6d914
commit d716ff71dd

View File

@ -4332,17 +4332,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
} }
trace_seq_init(&iter->seq); trace_seq_init(&iter->seq);
iter->trace = tr->current_trace;
/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
if (!iter->trace) {
ret = -ENOMEM;
goto fail;
}
*iter->trace = *tr->current_trace;
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
ret = -ENOMEM; ret = -ENOMEM;
@ -4399,7 +4389,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
free_cpumask_var(iter->started); free_cpumask_var(iter->started);
mutex_destroy(&iter->mutex); mutex_destroy(&iter->mutex);
kfree(iter->trace);
kfree(iter); kfree(iter);
trace_array_put(tr); trace_array_put(tr);
@ -4432,7 +4421,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
return trace_poll(iter, filp, poll_table); return trace_poll(iter, filp, poll_table);
} }
/* Must be called with trace_types_lock mutex held. */ /* Must be called with iter->mutex held. */
static int tracing_wait_pipe(struct file *filp) static int tracing_wait_pipe(struct file *filp)
{ {
struct trace_iterator *iter = filp->private_data; struct trace_iterator *iter = filp->private_data;
@ -4477,7 +4466,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct trace_iterator *iter = filp->private_data; struct trace_iterator *iter = filp->private_data;
struct trace_array *tr = iter->tr;
ssize_t sret; ssize_t sret;
/* return any leftover data */ /* return any leftover data */
@ -4487,12 +4475,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
trace_seq_init(&iter->seq); trace_seq_init(&iter->seq);
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(iter->trace->name != tr->current_trace->name))
*iter->trace = *tr->current_trace;
mutex_unlock(&trace_types_lock);
/* /*
* Avoid more than one consumer on a single file descriptor * Avoid more than one consumer on a single file descriptor
* This is just a matter of traces coherency, the ring buffer itself * This is just a matter of traces coherency, the ring buffer itself
@ -4652,7 +4634,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
.ops = &tracing_pipe_buf_ops, .ops = &tracing_pipe_buf_ops,
.spd_release = tracing_spd_release_pipe, .spd_release = tracing_spd_release_pipe,
}; };
struct trace_array *tr = iter->tr;
ssize_t ret; ssize_t ret;
size_t rem; size_t rem;
unsigned int i; unsigned int i;
@ -4660,12 +4641,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
if (splice_grow_spd(pipe, &spd)) if (splice_grow_spd(pipe, &spd))
return -ENOMEM; return -ENOMEM;
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(iter->trace->name != tr->current_trace->name))
*iter->trace = *tr->current_trace;
mutex_unlock(&trace_types_lock);
mutex_lock(&iter->mutex); mutex_lock(&iter->mutex);
if (iter->trace->splice_read) { if (iter->trace->splice_read) {
@ -5373,21 +5348,16 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (!count) if (!count)
return 0; return 0;
mutex_lock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
if (iter->snapshot && iter->tr->current_trace->use_max_tr) { if (iter->snapshot && iter->tr->current_trace->use_max_tr)
size = -EBUSY; return -EBUSY;
goto out_unlock;
}
#endif #endif
if (!info->spare) if (!info->spare)
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
iter->cpu_file); iter->cpu_file);
size = -ENOMEM;
if (!info->spare) if (!info->spare)
goto out_unlock; return -ENOMEM;
/* Do we have previous read data to read? */ /* Do we have previous read data to read? */
if (info->read < PAGE_SIZE) if (info->read < PAGE_SIZE)
@ -5403,21 +5373,16 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (ret < 0) { if (ret < 0) {
if (trace_empty(iter)) { if (trace_empty(iter)) {
if ((filp->f_flags & O_NONBLOCK)) { if ((filp->f_flags & O_NONBLOCK))
size = -EAGAIN; return -EAGAIN;
goto out_unlock;
}
mutex_unlock(&trace_types_lock);
ret = wait_on_pipe(iter, false); ret = wait_on_pipe(iter, false);
mutex_lock(&trace_types_lock); if (ret)
if (ret) { return ret;
size = ret;
goto out_unlock;
}
goto again; goto again;
} }
size = 0; return 0;
goto out_unlock;
} }
info->read = 0; info->read = 0;
@ -5427,18 +5392,14 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
size = count; size = count;
ret = copy_to_user(ubuf, info->spare + info->read, size); ret = copy_to_user(ubuf, info->spare + info->read, size);
if (ret == size) { if (ret == size)
size = -EFAULT; return -EFAULT;
goto out_unlock;
}
size -= ret; size -= ret;
*ppos += size; *ppos += size;
info->read += size; info->read += size;
out_unlock:
mutex_unlock(&trace_types_lock);
return size; return size;
} }
@ -5536,30 +5497,20 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
int entries, size, i; int entries, size, i;
ssize_t ret = 0; ssize_t ret = 0;
mutex_lock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
if (iter->snapshot && iter->tr->current_trace->use_max_tr) { if (iter->snapshot && iter->tr->current_trace->use_max_tr)
ret = -EBUSY; return -EBUSY;
goto out;
}
#endif #endif
if (splice_grow_spd(pipe, &spd)) { if (splice_grow_spd(pipe, &spd))
ret = -ENOMEM; return -ENOMEM;
goto out;
}
if (*ppos & (PAGE_SIZE - 1)) { if (*ppos & (PAGE_SIZE - 1))
ret = -EINVAL; return -EINVAL;
goto out;
}
if (len & (PAGE_SIZE - 1)) { if (len & (PAGE_SIZE - 1)) {
if (len < PAGE_SIZE) { if (len < PAGE_SIZE)
ret = -EINVAL; return -EINVAL;
goto out;
}
len &= PAGE_MASK; len &= PAGE_MASK;
} }
@ -5620,25 +5571,20 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
/* did we read anything? */ /* did we read anything? */
if (!spd.nr_pages) { if (!spd.nr_pages) {
if (ret) if (ret)
goto out; return ret;
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
return -EAGAIN;
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
ret = -EAGAIN;
goto out;
}
mutex_unlock(&trace_types_lock);
ret = wait_on_pipe(iter, true); ret = wait_on_pipe(iter, true);
mutex_lock(&trace_types_lock);
if (ret) if (ret)
goto out; return ret;
goto again; goto again;
} }
ret = splice_to_pipe(pipe, &spd); ret = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd); splice_shrink_spd(&spd);
out:
mutex_unlock(&trace_types_lock);
return ret; return ret;
} }