forked from Minki/linux
Merge branches 'tracing/ftrace', 'tracing/markers', 'tracing/mmiotrace', 'tracing/nmisafe', 'tracing/tracepoints' and 'tracing/urgent' into tracing/core
This commit is contained in:
commit
7a895f53cd
@ -1,6 +1,11 @@
|
||||
#ifndef _ASM_ARM_FTRACE
|
||||
#define _ASM_ARM_FTRACE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
static inline void ftrace_nmi_enter(void) { }
|
||||
static inline void ftrace_nmi_exit(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#define MCOUNT_ADDR ((long)(mcount))
|
||||
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
||||
|
@ -1,6 +1,11 @@
|
||||
#ifndef _ASM_POWERPC_FTRACE
|
||||
#define _ASM_POWERPC_FTRACE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
static inline void ftrace_nmi_enter(void) { }
|
||||
static inline void ftrace_nmi_exit(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#define MCOUNT_ADDR ((long)(_mcount))
|
||||
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
||||
|
@ -1,6 +1,11 @@
|
||||
#ifndef __ASM_SH_FTRACE_H
|
||||
#define __ASM_SH_FTRACE_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
static inline void ftrace_nmi_enter(void) { }
|
||||
static inline void ftrace_nmi_exit(void) { }
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void mcount(void);
|
||||
#endif
|
||||
|
@ -1,6 +1,11 @@
|
||||
#ifndef _ASM_SPARC64_FTRACE
|
||||
#define _ASM_SPARC64_FTRACE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
static inline void ftrace_nmi_enter(void) { }
|
||||
static inline void ftrace_nmi_exit(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MCOUNT
|
||||
#define MCOUNT_ADDR ((long)(_mcount))
|
||||
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
||||
|
@ -186,14 +186,10 @@ config IOMMU_LEAK
|
||||
Add a simple leak tracer to the IOMMU code. This is useful when you
|
||||
are debugging a buggy device driver that leaks IOMMU mappings.
|
||||
|
||||
config MMIOTRACE_HOOKS
|
||||
bool
|
||||
|
||||
config MMIOTRACE
|
||||
bool "Memory mapped IO tracing"
|
||||
depends on DEBUG_KERNEL && PCI
|
||||
select TRACING
|
||||
select MMIOTRACE_HOOKS
|
||||
help
|
||||
Mmiotrace traces Memory Mapped I/O access and is meant for
|
||||
debugging and reverse engineering. It is called from the ioremap
|
||||
|
@ -17,6 +17,21 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
*/
|
||||
return addr - 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
extern void ftrace_nmi_enter(void);
|
||||
extern void ftrace_nmi_exit(void);
|
||||
#else
|
||||
static inline void ftrace_nmi_enter(void) { }
|
||||
static inline void ftrace_nmi_exit(void) { }
|
||||
#endif
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#else /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
static inline void ftrace_nmi_enter(void) { }
|
||||
static inline void ftrace_nmi_exit(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
@ -56,6 +56,133 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
|
||||
return calc.code;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modifying code must take extra care. On an SMP machine, if
|
||||
* the code being modified is also being executed on another CPU
|
||||
* that CPU will have undefined results and possibly take a GPF.
|
||||
* We use kstop_machine to stop other CPUS from exectuing code.
|
||||
* But this does not stop NMIs from happening. We still need
|
||||
* to protect against that. We separate out the modification of
|
||||
* the code to take care of this.
|
||||
*
|
||||
* Two buffers are added: An IP buffer and a "code" buffer.
|
||||
*
|
||||
* 1) Put the instruction pointer into the IP buffer
|
||||
* and the new code into the "code" buffer.
|
||||
* 2) Set a flag that says we are modifying code
|
||||
* 3) Wait for any running NMIs to finish.
|
||||
* 4) Write the code
|
||||
* 5) clear the flag.
|
||||
* 6) Wait for any running NMIs to finish.
|
||||
*
|
||||
* If an NMI is executed, the first thing it does is to call
|
||||
* "ftrace_nmi_enter". This will check if the flag is set to write
|
||||
* and if it is, it will write what is in the IP and "code" buffers.
|
||||
*
|
||||
* The trick is, it does not matter if everyone is writing the same
|
||||
* content to the code location. Also, if a CPU is executing code
|
||||
* it is OK to write to that code location if the contents being written
|
||||
* are the same as what exists.
|
||||
*/
|
||||
|
||||
static atomic_t in_nmi = ATOMIC_INIT(0);
|
||||
static int mod_code_status; /* holds return value of text write */
|
||||
static int mod_code_write; /* set when NMI should do the write */
|
||||
static void *mod_code_ip; /* holds the IP to write to */
|
||||
static void *mod_code_newcode; /* holds the text to write to the IP */
|
||||
|
||||
static unsigned nmi_wait_count;
|
||||
static atomic_t nmi_update_count = ATOMIC_INIT(0);
|
||||
|
||||
int ftrace_arch_read_dyn_info(char *buf, int size)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = snprintf(buf, size, "%u %u",
|
||||
nmi_wait_count,
|
||||
atomic_read(&nmi_update_count));
|
||||
return r;
|
||||
}
|
||||
|
||||
static void ftrace_mod_code(void)
|
||||
{
|
||||
/*
|
||||
* Yes, more than one CPU process can be writing to mod_code_status.
|
||||
* (and the code itself)
|
||||
* But if one were to fail, then they all should, and if one were
|
||||
* to succeed, then they all should.
|
||||
*/
|
||||
mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
|
||||
MCOUNT_INSN_SIZE);
|
||||
|
||||
}
|
||||
|
||||
void ftrace_nmi_enter(void)
|
||||
{
|
||||
atomic_inc(&in_nmi);
|
||||
/* Must have in_nmi seen before reading write flag */
|
||||
smp_mb();
|
||||
if (mod_code_write) {
|
||||
ftrace_mod_code();
|
||||
atomic_inc(&nmi_update_count);
|
||||
}
|
||||
}
|
||||
|
||||
void ftrace_nmi_exit(void)
|
||||
{
|
||||
/* Finish all executions before clearing in_nmi */
|
||||
smp_wmb();
|
||||
atomic_dec(&in_nmi);
|
||||
}
|
||||
|
||||
static void wait_for_nmi(void)
|
||||
{
|
||||
int waited = 0;
|
||||
|
||||
while (atomic_read(&in_nmi)) {
|
||||
waited = 1;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
if (waited)
|
||||
nmi_wait_count++;
|
||||
}
|
||||
|
||||
static int
|
||||
do_ftrace_mod_code(unsigned long ip, void *new_code)
|
||||
{
|
||||
mod_code_ip = (void *)ip;
|
||||
mod_code_newcode = new_code;
|
||||
|
||||
/* The buffers need to be visible before we let NMIs write them */
|
||||
smp_wmb();
|
||||
|
||||
mod_code_write = 1;
|
||||
|
||||
/* Make sure write bit is visible before we wait on NMIs */
|
||||
smp_mb();
|
||||
|
||||
wait_for_nmi();
|
||||
|
||||
/* Make sure all running NMIs have finished before we write the code */
|
||||
smp_mb();
|
||||
|
||||
ftrace_mod_code();
|
||||
|
||||
/* Make sure the write happens before clearing the bit */
|
||||
smp_wmb();
|
||||
|
||||
mod_code_write = 0;
|
||||
|
||||
/* make sure NMIs see the cleared bit */
|
||||
smp_mb();
|
||||
|
||||
wait_for_nmi();
|
||||
|
||||
return mod_code_status;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
||||
unsigned char *new_code)
|
||||
@ -81,7 +208,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
||||
return -EINVAL;
|
||||
|
||||
/* replace the text with the new text */
|
||||
if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
|
||||
if (do_ftrace_mod_code(ip, new_code))
|
||||
return -EPERM;
|
||||
|
||||
sync_core();
|
||||
|
@ -8,9 +8,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
|
||||
|
||||
obj-$(CONFIG_HIGHMEM) += highmem_32.o
|
||||
|
||||
obj-$(CONFIG_MMIOTRACE_HOOKS) += kmmio.o
|
||||
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
|
||||
mmiotrace-y := pf_in.o mmio-mod.o
|
||||
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
|
||||
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
|
||||
|
||||
obj-$(CONFIG_NUMA) += numa_$(BITS).o
|
||||
|
@ -53,7 +53,7 @@
|
||||
|
||||
static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
#ifdef CONFIG_MMIOTRACE_HOOKS
|
||||
#ifdef CONFIG_MMIOTRACE
|
||||
if (unlikely(is_kmmio_active()))
|
||||
if (kmmio_handler(regs, addr) == 1)
|
||||
return -1;
|
||||
|
@ -74,6 +74,9 @@ extern void ftrace_caller(void);
|
||||
extern void ftrace_call(void);
|
||||
extern void mcount_call(void);
|
||||
|
||||
/* May be defined in arch */
|
||||
extern int ftrace_arch_read_dyn_info(char *buf, int size);
|
||||
|
||||
/**
|
||||
* ftrace_modify_code - modify code segment
|
||||
* @ip: the address of the code segment
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
/*
|
||||
@ -161,7 +162,17 @@ extern void irq_enter(void);
|
||||
*/
|
||||
extern void irq_exit(void);
|
||||
|
||||
#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0)
|
||||
#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
|
||||
#define nmi_enter() \
|
||||
do { \
|
||||
ftrace_nmi_enter(); \
|
||||
lockdep_off(); \
|
||||
__irq_enter(); \
|
||||
} while (0)
|
||||
#define nmi_exit() \
|
||||
do { \
|
||||
__irq_exit(); \
|
||||
lockdep_on(); \
|
||||
ftrace_nmi_exit(); \
|
||||
} while (0)
|
||||
|
||||
#endif /* LINUX_HARDIRQ_H */
|
||||
|
@ -136,8 +136,6 @@ extern marker_probe_func __mark_empty_function;
|
||||
|
||||
extern void marker_probe_cb(const struct marker *mdata,
|
||||
void *call_private, ...);
|
||||
extern void marker_probe_cb_noarg(const struct marker *mdata,
|
||||
void *call_private, ...);
|
||||
|
||||
/*
|
||||
* Connect a probe to a marker.
|
||||
|
@ -112,6 +112,10 @@ extern int tracepoint_probe_register(const char *name, void *probe);
|
||||
*/
|
||||
extern int tracepoint_probe_unregister(const char *name, void *probe);
|
||||
|
||||
extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
|
||||
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
|
||||
extern void tracepoint_probe_update_all(void);
|
||||
|
||||
struct tracepoint_iter {
|
||||
struct module *module;
|
||||
struct tracepoint *tracepoint;
|
||||
|
@ -43,6 +43,7 @@ static DEFINE_MUTEX(markers_mutex);
|
||||
*/
|
||||
#define MARKER_HASH_BITS 6
|
||||
#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
|
||||
static struct hlist_head marker_table[MARKER_TABLE_SIZE];
|
||||
|
||||
/*
|
||||
* Note about RCU :
|
||||
@ -64,11 +65,10 @@ struct marker_entry {
|
||||
void *oldptr;
|
||||
int rcu_pending;
|
||||
unsigned char ptype:1;
|
||||
unsigned char format_allocated:1;
|
||||
char name[0]; /* Contains name'\0'format'\0' */
|
||||
};
|
||||
|
||||
static struct hlist_head marker_table[MARKER_TABLE_SIZE];
|
||||
|
||||
/**
|
||||
* __mark_empty_function - Empty probe callback
|
||||
* @probe_private: probe private data
|
||||
@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(marker_probe_cb);
|
||||
*
|
||||
* Should be connected to markers "MARK_NOARGS".
|
||||
*/
|
||||
void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
|
||||
static void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
|
||||
{
|
||||
va_list args; /* not initialized */
|
||||
char ptype;
|
||||
@ -197,7 +197,6 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
|
||||
}
|
||||
rcu_read_unlock_sched();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
|
||||
|
||||
static void free_old_closure(struct rcu_head *head)
|
||||
{
|
||||
@ -416,6 +415,7 @@ static struct marker_entry *add_marker(const char *name, const char *format)
|
||||
e->single.probe_private = NULL;
|
||||
e->multi = NULL;
|
||||
e->ptype = 0;
|
||||
e->format_allocated = 0;
|
||||
e->refcount = 0;
|
||||
e->rcu_pending = 0;
|
||||
hlist_add_head(&e->hlist, head);
|
||||
@ -447,6 +447,8 @@ static int remove_marker(const char *name)
|
||||
if (e->single.func != __mark_empty_function)
|
||||
return -EBUSY;
|
||||
hlist_del(&e->hlist);
|
||||
if (e->format_allocated)
|
||||
kfree(e->format);
|
||||
/* Make sure the call_rcu has been executed */
|
||||
if (e->rcu_pending)
|
||||
rcu_barrier_sched();
|
||||
@ -457,57 +459,34 @@ static int remove_marker(const char *name)
|
||||
/*
|
||||
* Set the mark_entry format to the format found in the element.
|
||||
*/
|
||||
static int marker_set_format(struct marker_entry **entry, const char *format)
|
||||
static int marker_set_format(struct marker_entry *entry, const char *format)
|
||||
{
|
||||
struct marker_entry *e;
|
||||
size_t name_len = strlen((*entry)->name) + 1;
|
||||
size_t format_len = strlen(format) + 1;
|
||||
|
||||
|
||||
e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
|
||||
GFP_KERNEL);
|
||||
if (!e)
|
||||
entry->format = kstrdup(format, GFP_KERNEL);
|
||||
if (!entry->format)
|
||||
return -ENOMEM;
|
||||
memcpy(&e->name[0], (*entry)->name, name_len);
|
||||
e->format = &e->name[name_len];
|
||||
memcpy(e->format, format, format_len);
|
||||
if (strcmp(e->format, MARK_NOARGS) == 0)
|
||||
e->call = marker_probe_cb_noarg;
|
||||
else
|
||||
e->call = marker_probe_cb;
|
||||
e->single = (*entry)->single;
|
||||
e->multi = (*entry)->multi;
|
||||
e->ptype = (*entry)->ptype;
|
||||
e->refcount = (*entry)->refcount;
|
||||
e->rcu_pending = 0;
|
||||
hlist_add_before(&e->hlist, &(*entry)->hlist);
|
||||
hlist_del(&(*entry)->hlist);
|
||||
/* Make sure the call_rcu has been executed */
|
||||
if ((*entry)->rcu_pending)
|
||||
rcu_barrier_sched();
|
||||
kfree(*entry);
|
||||
*entry = e;
|
||||
entry->format_allocated = 1;
|
||||
|
||||
trace_mark(core_marker_format, "name %s format %s",
|
||||
e->name, e->format);
|
||||
entry->name, entry->format);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the probe callback corresponding to one marker.
|
||||
*/
|
||||
static int set_marker(struct marker_entry **entry, struct marker *elem,
|
||||
static int set_marker(struct marker_entry *entry, struct marker *elem,
|
||||
int active)
|
||||
{
|
||||
int ret;
|
||||
WARN_ON(strcmp((*entry)->name, elem->name) != 0);
|
||||
WARN_ON(strcmp(entry->name, elem->name) != 0);
|
||||
|
||||
if ((*entry)->format) {
|
||||
if (strcmp((*entry)->format, elem->format) != 0) {
|
||||
if (entry->format) {
|
||||
if (strcmp(entry->format, elem->format) != 0) {
|
||||
printk(KERN_NOTICE
|
||||
"Format mismatch for probe %s "
|
||||
"(%s), marker (%s)\n",
|
||||
(*entry)->name,
|
||||
(*entry)->format,
|
||||
entry->name,
|
||||
entry->format,
|
||||
elem->format);
|
||||
return -EPERM;
|
||||
}
|
||||
@ -523,34 +502,33 @@ static int set_marker(struct marker_entry **entry, struct marker *elem,
|
||||
* pass from a "safe" callback (with argument) to an "unsafe"
|
||||
* callback (does not set arguments).
|
||||
*/
|
||||
elem->call = (*entry)->call;
|
||||
elem->call = entry->call;
|
||||
/*
|
||||
* Sanity check :
|
||||
* We only update the single probe private data when the ptr is
|
||||
* set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
|
||||
*/
|
||||
WARN_ON(elem->single.func != __mark_empty_function
|
||||
&& elem->single.probe_private
|
||||
!= (*entry)->single.probe_private &&
|
||||
!elem->ptype);
|
||||
elem->single.probe_private = (*entry)->single.probe_private;
|
||||
&& elem->single.probe_private != entry->single.probe_private
|
||||
&& !elem->ptype);
|
||||
elem->single.probe_private = entry->single.probe_private;
|
||||
/*
|
||||
* Make sure the private data is valid when we update the
|
||||
* single probe ptr.
|
||||
*/
|
||||
smp_wmb();
|
||||
elem->single.func = (*entry)->single.func;
|
||||
elem->single.func = entry->single.func;
|
||||
/*
|
||||
* We also make sure that the new probe callbacks array is consistent
|
||||
* before setting a pointer to it.
|
||||
*/
|
||||
rcu_assign_pointer(elem->multi, (*entry)->multi);
|
||||
rcu_assign_pointer(elem->multi, entry->multi);
|
||||
/*
|
||||
* Update the function or multi probe array pointer before setting the
|
||||
* ptype.
|
||||
*/
|
||||
smp_wmb();
|
||||
elem->ptype = (*entry)->ptype;
|
||||
elem->ptype = entry->ptype;
|
||||
elem->state = active;
|
||||
|
||||
return 0;
|
||||
@ -594,8 +572,7 @@ void marker_update_probe_range(struct marker *begin,
|
||||
for (iter = begin; iter < end; iter++) {
|
||||
mark_entry = get_marker(iter->name);
|
||||
if (mark_entry) {
|
||||
set_marker(&mark_entry, iter,
|
||||
!!mark_entry->refcount);
|
||||
set_marker(mark_entry, iter, !!mark_entry->refcount);
|
||||
/*
|
||||
* ignore error, continue
|
||||
*/
|
||||
@ -657,7 +634,7 @@ int marker_probe_register(const char *name, const char *format,
|
||||
ret = PTR_ERR(entry);
|
||||
} else if (format) {
|
||||
if (!entry->format)
|
||||
ret = marker_set_format(&entry, format);
|
||||
ret = marker_set_format(entry, format);
|
||||
else if (strcmp(entry->format, format))
|
||||
ret = -EPERM;
|
||||
}
|
||||
@ -848,8 +825,6 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe,
|
||||
if (!e->ptype) {
|
||||
if (num == 0 && e->single.func == probe)
|
||||
return e->single.probe_private;
|
||||
else
|
||||
break;
|
||||
} else {
|
||||
struct marker_probe_closure *closure;
|
||||
int match = 0;
|
||||
@ -861,6 +836,7 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe,
|
||||
return closure[i].probe_private;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
@ -25,7 +25,7 @@ config TRACING
|
||||
bool
|
||||
select DEBUG_FS
|
||||
select RING_BUFFER
|
||||
select STACKTRACE
|
||||
select STACKTRACE if STACKTRACE_SUPPORT
|
||||
select TRACEPOINTS
|
||||
select NOP_TRACER
|
||||
|
||||
|
@ -736,6 +736,7 @@ static void ftrace_trace_stack(struct trace_array *tr,
|
||||
unsigned long flags,
|
||||
int skip, int pc)
|
||||
{
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
struct ring_buffer_event *event;
|
||||
struct stack_entry *entry;
|
||||
struct stack_trace trace;
|
||||
@ -761,6 +762,7 @@ static void ftrace_trace_stack(struct trace_array *tr,
|
||||
|
||||
save_stack_trace(&trace);
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
void __trace_stack(struct trace_array *tr,
|
||||
@ -2860,22 +2862,38 @@ static struct file_operations tracing_mark_fops = {
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
static ssize_t
|
||||
tracing_read_long(struct file *filp, char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
int __weak ftrace_arch_read_dyn_info(char *buf, int size)
|
||||
{
|
||||
unsigned long *p = filp->private_data;
|
||||
char buf[64];
|
||||
int r;
|
||||
|
||||
r = sprintf(buf, "%ld\n", *p);
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct file_operations tracing_read_long_fops = {
|
||||
static ssize_t
|
||||
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
static char ftrace_dyn_info_buffer[1024];
|
||||
static DEFINE_MUTEX(dyn_info_mutex);
|
||||
unsigned long *p = filp->private_data;
|
||||
char *buf = ftrace_dyn_info_buffer;
|
||||
int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
|
||||
int r;
|
||||
|
||||
mutex_lock(&dyn_info_mutex);
|
||||
r = sprintf(buf, "%ld ", *p);
|
||||
|
||||
r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
|
||||
buf[r++] = '\n';
|
||||
|
||||
r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
||||
|
||||
mutex_unlock(&dyn_info_mutex);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct file_operations tracing_dyn_info_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.read = tracing_read_long,
|
||||
.read = tracing_read_dyn_info,
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -2984,7 +3002,7 @@ static __init int tracer_init_debugfs(void)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
|
||||
&ftrace_update_tot_cnt,
|
||||
&tracing_read_long_fops);
|
||||
&tracing_dyn_info_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'dyn_ftrace_total_info' entry\n");
|
||||
|
@ -43,6 +43,7 @@ static DEFINE_MUTEX(tracepoints_mutex);
|
||||
*/
|
||||
#define TRACEPOINT_HASH_BITS 6
|
||||
#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
|
||||
static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
|
||||
|
||||
/*
|
||||
* Note about RCU :
|
||||
@ -54,40 +55,43 @@ struct tracepoint_entry {
|
||||
struct hlist_node hlist;
|
||||
void **funcs;
|
||||
int refcount; /* Number of times armed. 0 if disarmed. */
|
||||
struct rcu_head rcu;
|
||||
void *oldptr;
|
||||
unsigned char rcu_pending:1;
|
||||
char name[0];
|
||||
};
|
||||
|
||||
static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
|
||||
struct tp_probes {
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
struct list_head list;
|
||||
} u;
|
||||
void *probes[0];
|
||||
};
|
||||
|
||||
static void free_old_closure(struct rcu_head *head)
|
||||
static inline void *allocate_probes(int count)
|
||||
{
|
||||
struct tracepoint_entry *entry = container_of(head,
|
||||
struct tracepoint_entry, rcu);
|
||||
kfree(entry->oldptr);
|
||||
/* Make sure we free the data before setting the pending flag to 0 */
|
||||
smp_wmb();
|
||||
entry->rcu_pending = 0;
|
||||
struct tp_probes *p = kmalloc(count * sizeof(void *)
|
||||
+ sizeof(struct tp_probes), GFP_KERNEL);
|
||||
return p == NULL ? NULL : p->probes;
|
||||
}
|
||||
|
||||
static void tracepoint_entry_free_old(struct tracepoint_entry *entry, void *old)
|
||||
static void rcu_free_old_probes(struct rcu_head *head)
|
||||
{
|
||||
if (!old)
|
||||
return;
|
||||
entry->oldptr = old;
|
||||
entry->rcu_pending = 1;
|
||||
/* write rcu_pending before calling the RCU callback */
|
||||
smp_wmb();
|
||||
call_rcu_sched(&entry->rcu, free_old_closure);
|
||||
kfree(container_of(head, struct tp_probes, u.rcu));
|
||||
}
|
||||
|
||||
static inline void release_probes(void *old)
|
||||
{
|
||||
if (old) {
|
||||
struct tp_probes *tp_probes = container_of(old,
|
||||
struct tp_probes, probes[0]);
|
||||
call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
|
||||
}
|
||||
}
|
||||
|
||||
static void debug_print_probes(struct tracepoint_entry *entry)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!tracepoint_debug)
|
||||
if (!tracepoint_debug || !entry->funcs)
|
||||
return;
|
||||
|
||||
for (i = 0; entry->funcs[i]; i++)
|
||||
@ -111,12 +115,13 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
|
||||
return ERR_PTR(-EEXIST);
|
||||
}
|
||||
/* + 2 : one for new probe, one for NULL func */
|
||||
new = kzalloc((nr_probes + 2) * sizeof(void *), GFP_KERNEL);
|
||||
new = allocate_probes(nr_probes + 2);
|
||||
if (new == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (old)
|
||||
memcpy(new, old, nr_probes * sizeof(void *));
|
||||
new[nr_probes] = probe;
|
||||
new[nr_probes + 1] = NULL;
|
||||
entry->refcount = nr_probes + 1;
|
||||
entry->funcs = new;
|
||||
debug_print_probes(entry);
|
||||
@ -132,7 +137,7 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
|
||||
old = entry->funcs;
|
||||
|
||||
if (!old)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
debug_print_probes(entry);
|
||||
/* (N -> M), (N > 1, M >= 0) probes */
|
||||
@ -151,13 +156,13 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
|
||||
int j = 0;
|
||||
/* N -> M, (N > 1, M > 0) */
|
||||
/* + 1 for NULL */
|
||||
new = kzalloc((nr_probes - nr_del + 1)
|
||||
* sizeof(void *), GFP_KERNEL);
|
||||
new = allocate_probes(nr_probes - nr_del + 1);
|
||||
if (new == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
for (i = 0; old[i]; i++)
|
||||
if ((probe && old[i] != probe))
|
||||
new[j++] = old[i];
|
||||
new[nr_probes - nr_del] = NULL;
|
||||
entry->refcount = nr_probes - nr_del;
|
||||
entry->funcs = new;
|
||||
}
|
||||
@ -215,7 +220,6 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
|
||||
memcpy(&e->name[0], name, name_len);
|
||||
e->funcs = NULL;
|
||||
e->refcount = 0;
|
||||
e->rcu_pending = 0;
|
||||
hlist_add_head(&e->hlist, head);
|
||||
return e;
|
||||
}
|
||||
@ -224,32 +228,10 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
|
||||
* Remove the tracepoint from the tracepoint hash table. Must be called with
|
||||
* mutex_lock held.
|
||||
*/
|
||||
static int remove_tracepoint(const char *name)
|
||||
static inline void remove_tracepoint(struct tracepoint_entry *e)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct tracepoint_entry *e;
|
||||
int found = 0;
|
||||
size_t len = strlen(name) + 1;
|
||||
u32 hash = jhash(name, len-1, 0);
|
||||
|
||||
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
|
||||
hlist_for_each_entry(e, node, head, hlist) {
|
||||
if (!strcmp(name, e->name)) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
return -ENOENT;
|
||||
if (e->refcount)
|
||||
return -EBUSY;
|
||||
hlist_del(&e->hlist);
|
||||
/* Make sure the call_rcu_sched has been executed */
|
||||
if (e->rcu_pending)
|
||||
rcu_barrier_sched();
|
||||
kfree(e);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -320,6 +302,23 @@ static void tracepoint_update_probes(void)
|
||||
module_update_tracepoints();
|
||||
}
|
||||
|
||||
static void *tracepoint_add_probe(const char *name, void *probe)
|
||||
{
|
||||
struct tracepoint_entry *entry;
|
||||
void *old;
|
||||
|
||||
entry = get_tracepoint(name);
|
||||
if (!entry) {
|
||||
entry = add_tracepoint(name);
|
||||
if (IS_ERR(entry))
|
||||
return entry;
|
||||
}
|
||||
old = tracepoint_entry_add_probe(entry, probe);
|
||||
if (IS_ERR(old) && !entry->refcount)
|
||||
remove_tracepoint(entry);
|
||||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracepoint_probe_register - Connect a probe to a tracepoint
|
||||
* @name: tracepoint name
|
||||
@ -330,44 +329,36 @@ static void tracepoint_update_probes(void)
|
||||
*/
|
||||
int tracepoint_probe_register(const char *name, void *probe)
|
||||
{
|
||||
struct tracepoint_entry *entry;
|
||||
int ret = 0;
|
||||
void *old;
|
||||
|
||||
mutex_lock(&tracepoints_mutex);
|
||||
entry = get_tracepoint(name);
|
||||
if (!entry) {
|
||||
entry = add_tracepoint(name);
|
||||
if (IS_ERR(entry)) {
|
||||
ret = PTR_ERR(entry);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If we detect that a call_rcu_sched is pending for this tracepoint,
|
||||
* make sure it's executed now.
|
||||
*/
|
||||
if (entry->rcu_pending)
|
||||
rcu_barrier_sched();
|
||||
old = tracepoint_entry_add_probe(entry, probe);
|
||||
if (IS_ERR(old)) {
|
||||
ret = PTR_ERR(old);
|
||||
goto end;
|
||||
}
|
||||
old = tracepoint_add_probe(name, probe);
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
if (IS_ERR(old))
|
||||
return PTR_ERR(old);
|
||||
|
||||
tracepoint_update_probes(); /* may update entry */
|
||||
mutex_lock(&tracepoints_mutex);
|
||||
entry = get_tracepoint(name);
|
||||
WARN_ON(!entry);
|
||||
if (entry->rcu_pending)
|
||||
rcu_barrier_sched();
|
||||
tracepoint_entry_free_old(entry, old);
|
||||
end:
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
return ret;
|
||||
release_probes(old);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tracepoint_probe_register);
|
||||
|
||||
static void *tracepoint_remove_probe(const char *name, void *probe)
|
||||
{
|
||||
struct tracepoint_entry *entry;
|
||||
void *old;
|
||||
|
||||
entry = get_tracepoint(name);
|
||||
if (!entry)
|
||||
return ERR_PTR(-ENOENT);
|
||||
old = tracepoint_entry_remove_probe(entry, probe);
|
||||
if (IS_ERR(old))
|
||||
return old;
|
||||
if (!entry->refcount)
|
||||
remove_tracepoint(entry);
|
||||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracepoint_probe_unregister - Disconnect a probe from a tracepoint
|
||||
* @name: tracepoint name
|
||||
@ -380,39 +371,105 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register);
|
||||
*/
|
||||
int tracepoint_probe_unregister(const char *name, void *probe)
|
||||
{
|
||||
struct tracepoint_entry *entry;
|
||||
void *old;
|
||||
int ret = -ENOENT;
|
||||
|
||||
mutex_lock(&tracepoints_mutex);
|
||||
entry = get_tracepoint(name);
|
||||
if (!entry)
|
||||
goto end;
|
||||
if (entry->rcu_pending)
|
||||
rcu_barrier_sched();
|
||||
old = tracepoint_entry_remove_probe(entry, probe);
|
||||
if (!old) {
|
||||
printk(KERN_WARNING "Warning: Trying to unregister a probe"
|
||||
"that doesn't exist\n");
|
||||
goto end;
|
||||
}
|
||||
old = tracepoint_remove_probe(name, probe);
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
if (IS_ERR(old))
|
||||
return PTR_ERR(old);
|
||||
|
||||
tracepoint_update_probes(); /* may update entry */
|
||||
mutex_lock(&tracepoints_mutex);
|
||||
entry = get_tracepoint(name);
|
||||
if (!entry)
|
||||
goto end;
|
||||
if (entry->rcu_pending)
|
||||
rcu_barrier_sched();
|
||||
tracepoint_entry_free_old(entry, old);
|
||||
remove_tracepoint(name); /* Ignore busy error message */
|
||||
ret = 0;
|
||||
end:
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
return ret;
|
||||
release_probes(old);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
|
||||
|
||||
static LIST_HEAD(old_probes);
|
||||
static int need_update;
|
||||
|
||||
static void tracepoint_add_old_probes(void *old)
|
||||
{
|
||||
need_update = 1;
|
||||
if (old) {
|
||||
struct tp_probes *tp_probes = container_of(old,
|
||||
struct tp_probes, probes[0]);
|
||||
list_add(&tp_probes->u.list, &old_probes);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tracepoint_probe_register_noupdate - register a probe but not connect
|
||||
* @name: tracepoint name
|
||||
* @probe: probe handler
|
||||
*
|
||||
* caller must call tracepoint_probe_update_all()
|
||||
*/
|
||||
int tracepoint_probe_register_noupdate(const char *name, void *probe)
|
||||
{
|
||||
void *old;
|
||||
|
||||
mutex_lock(&tracepoints_mutex);
|
||||
old = tracepoint_add_probe(name, probe);
|
||||
if (IS_ERR(old)) {
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
return PTR_ERR(old);
|
||||
}
|
||||
tracepoint_add_old_probes(old);
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
|
||||
|
||||
/**
|
||||
* tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
|
||||
* @name: tracepoint name
|
||||
* @probe: probe function pointer
|
||||
*
|
||||
* caller must call tracepoint_probe_update_all()
|
||||
*/
|
||||
int tracepoint_probe_unregister_noupdate(const char *name, void *probe)
|
||||
{
|
||||
void *old;
|
||||
|
||||
mutex_lock(&tracepoints_mutex);
|
||||
old = tracepoint_remove_probe(name, probe);
|
||||
if (IS_ERR(old)) {
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
return PTR_ERR(old);
|
||||
}
|
||||
tracepoint_add_old_probes(old);
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
|
||||
|
||||
/**
|
||||
* tracepoint_probe_update_all - update tracepoints
|
||||
*/
|
||||
void tracepoint_probe_update_all(void)
|
||||
{
|
||||
LIST_HEAD(release_probes);
|
||||
struct tp_probes *pos, *next;
|
||||
|
||||
mutex_lock(&tracepoints_mutex);
|
||||
if (!need_update) {
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
return;
|
||||
}
|
||||
if (!list_empty(&old_probes))
|
||||
list_replace_init(&old_probes, &release_probes);
|
||||
need_update = 0;
|
||||
mutex_unlock(&tracepoints_mutex);
|
||||
|
||||
tracepoint_update_probes();
|
||||
list_for_each_entry_safe(pos, next, &release_probes, u.list) {
|
||||
list_del(&pos->u.list);
|
||||
call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
|
||||
|
||||
/**
|
||||
* tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
|
||||
* @tracepoint: current tracepoints (in), next tracepoint (out)
|
||||
|
Loading…
Reference in New Issue
Block a user