forked from Minki/linux
workqueue: temporarily remove workqueue tracing
Strip tracing code from workqueue and remove workqueue tracing. This is temporary measure till concurrency managed workqueue is complete. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
parent
a62428c0ae
commit
6416669975
@ -1,92 +0,0 @@
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM workqueue
|
||||
|
||||
#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_WORKQUEUE_H
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
DECLARE_EVENT_CLASS(workqueue,
|
||||
|
||||
TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
|
||||
|
||||
TP_ARGS(wq_thread, work),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, thread_comm, TASK_COMM_LEN)
|
||||
__field(pid_t, thread_pid)
|
||||
__field(work_func_t, func)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
|
||||
__entry->thread_pid = wq_thread->pid;
|
||||
__entry->func = work->func;
|
||||
),
|
||||
|
||||
TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
|
||||
__entry->thread_pid, __entry->func)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(workqueue, workqueue_insertion,
|
||||
|
||||
TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
|
||||
|
||||
TP_ARGS(wq_thread, work)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(workqueue, workqueue_execution,
|
||||
|
||||
TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
|
||||
|
||||
TP_ARGS(wq_thread, work)
|
||||
);
|
||||
|
||||
/* Trace the creation of one workqueue thread on a cpu */
|
||||
TRACE_EVENT(workqueue_creation,
|
||||
|
||||
TP_PROTO(struct task_struct *wq_thread, int cpu),
|
||||
|
||||
TP_ARGS(wq_thread, cpu),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, thread_comm, TASK_COMM_LEN)
|
||||
__field(pid_t, thread_pid)
|
||||
__field(int, cpu)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
|
||||
__entry->thread_pid = wq_thread->pid;
|
||||
__entry->cpu = cpu;
|
||||
),
|
||||
|
||||
TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm,
|
||||
__entry->thread_pid, __entry->cpu)
|
||||
);
|
||||
|
||||
TRACE_EVENT(workqueue_destruction,
|
||||
|
||||
TP_PROTO(struct task_struct *wq_thread),
|
||||
|
||||
TP_ARGS(wq_thread),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, thread_comm, TASK_COMM_LEN)
|
||||
__field(pid_t, thread_pid)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
|
||||
__entry->thread_pid = wq_thread->pid;
|
||||
),
|
||||
|
||||
TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_WORKQUEUE_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -391,17 +391,6 @@ config KMEMTRACE
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config WORKQUEUE_TRACER
|
||||
bool "Trace workqueues"
|
||||
select GENERIC_TRACER
|
||||
help
|
||||
The workqueue tracer provides some statistical information
|
||||
about each cpu workqueue thread such as the number of the
|
||||
works inserted and executed since their creation. It can help
|
||||
to evaluate the amount of work each of them has to perform.
|
||||
For example it can help a developer to decide whether he should
|
||||
choose a per-cpu workqueue instead of a singlethreaded one.
|
||||
|
||||
config BLK_DEV_IO_TRACE
|
||||
bool "Support for tracing block IO actions"
|
||||
depends on SYSFS
|
||||
|
@ -33,8 +33,6 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/lockdep.h>
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/workqueue.h>
|
||||
|
||||
/*
|
||||
* Structure fields follow one of the following exclusion rules.
|
||||
@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work)
|
||||
atomic_long_set(&work->data, work_static(work));
|
||||
}
|
||||
|
||||
static inline
|
||||
struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
|
||||
static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
|
||||
{
|
||||
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
|
||||
return (void *)(atomic_long_read(&work->data) &
|
||||
WORK_STRUCT_WQ_DATA_MASK);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
|
||||
struct work_struct *work, struct list_head *head,
|
||||
unsigned int extra_flags)
|
||||
{
|
||||
trace_workqueue_insertion(cwq->thread, work);
|
||||
|
||||
/* we own @work, set data and link */
|
||||
set_wq_data(work, cwq, extra_flags);
|
||||
|
||||
@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq,
|
||||
struct lockdep_map lockdep_map = work->lockdep_map;
|
||||
#endif
|
||||
/* claim and process */
|
||||
trace_workqueue_execution(cwq->thread, work);
|
||||
debug_work_deactivate(work);
|
||||
cwq->current_work = work;
|
||||
list_del_init(&work->entry);
|
||||
@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
|
||||
return PTR_ERR(p);
|
||||
cwq->thread = p;
|
||||
|
||||
trace_workqueue_creation(cwq->thread, cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
|
||||
* checks list_empty(), and a "normal" queue_work() can't use
|
||||
* a dead CPU.
|
||||
*/
|
||||
trace_workqueue_destruction(cwq->thread);
|
||||
kthread_stop(cwq->thread);
|
||||
cwq->thread = NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user