forked from Minki/linux
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "This includes a fix for a crash if certain special addresses are kprobed, plus does a rename of two Kconfig variables that were a minor misnomer" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Rename CONFIG_[UK]PROBE_EVENT to CONFIG_[UK]PROBE_EVENTS kprobes/x86: Fix kernel panic when certain exception-handling addresses are probed
This commit is contained in:
commit
c3abcabe81
@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes
|
||||
functions). Unlike the Tracepoint based event, this can be added and removed
|
||||
dynamically, on the fly.
|
||||
|
||||
To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y.
|
||||
To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
|
||||
|
||||
Similar to the events tracer, this doesn't need to be activated via
|
||||
current_tracer. Instead of that, add probe points via
|
||||
|
@ -7,7 +7,7 @@
|
||||
Overview
|
||||
--------
|
||||
Uprobe based trace events are similar to kprobe based trace events.
|
||||
To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y.
|
||||
To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
|
||||
|
||||
Similar to the kprobe-event tracer, this doesn't need to be activated via
|
||||
current_tracer. Instead of that, add probe points via
|
||||
|
@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
|
@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
|
@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
|
@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
|
@ -67,7 +67,7 @@
|
||||
#endif
|
||||
|
||||
/* Ensure if the instruction can be boostable */
|
||||
extern int can_boost(kprobe_opcode_t *instruction);
|
||||
extern int can_boost(kprobe_opcode_t *instruction, void *addr);
|
||||
/* Recover instruction if given address is probed */
|
||||
extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
|
||||
unsigned long addr);
|
||||
|
@ -167,12 +167,12 @@ NOKPROBE_SYMBOL(skip_prefixes);
|
||||
* Returns non-zero if opcode is boostable.
|
||||
* RIP relative instructions are adjusted at copying time in 64 bits mode
|
||||
*/
|
||||
int can_boost(kprobe_opcode_t *opcodes)
|
||||
int can_boost(kprobe_opcode_t *opcodes, void *addr)
|
||||
{
|
||||
kprobe_opcode_t opcode;
|
||||
kprobe_opcode_t *orig_opcodes = opcodes;
|
||||
|
||||
if (search_exception_tables((unsigned long)opcodes))
|
||||
if (search_exception_tables((unsigned long)addr))
|
||||
return 0; /* Page fault may occur on this address. */
|
||||
|
||||
retry:
|
||||
@ -417,7 +417,7 @@ static int arch_copy_kprobe(struct kprobe *p)
|
||||
* __copy_instruction can modify the displacement of the instruction,
|
||||
* but it doesn't affect boostable check.
|
||||
*/
|
||||
if (can_boost(p->ainsn.insn))
|
||||
if (can_boost(p->ainsn.insn, p->addr))
|
||||
p->ainsn.boostable = 0;
|
||||
else
|
||||
p->ainsn.boostable = -1;
|
||||
|
@ -178,7 +178,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
|
||||
|
||||
while (len < RELATIVEJUMP_SIZE) {
|
||||
ret = __copy_instruction(dest + len, src + len);
|
||||
if (!ret || !can_boost(dest + len))
|
||||
if (!ret || !can_boost(dest + len, src + len))
|
||||
return -EINVAL;
|
||||
len += ret;
|
||||
}
|
||||
|
@ -429,7 +429,7 @@ config BLK_DEV_IO_TRACE
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config KPROBE_EVENT
|
||||
config KPROBE_EVENTS
|
||||
depends on KPROBES
|
||||
depends on HAVE_REGS_AND_STACK_ACCESS_API
|
||||
bool "Enable kprobes-based dynamic events"
|
||||
@ -447,7 +447,7 @@ config KPROBE_EVENT
|
||||
This option is also required by perf-probe subcommand of perf tools.
|
||||
If you want to use perf tools, this option is strongly recommended.
|
||||
|
||||
config UPROBE_EVENT
|
||||
config UPROBE_EVENTS
|
||||
bool "Enable uprobes-based dynamic events"
|
||||
depends on ARCH_SUPPORTS_UPROBES
|
||||
depends on MMU
|
||||
@ -466,7 +466,7 @@ config UPROBE_EVENT
|
||||
|
||||
config BPF_EVENTS
|
||||
depends on BPF_SYSCALL
|
||||
depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
|
||||
depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
|
||||
bool
|
||||
default y
|
||||
help
|
||||
|
@ -57,7 +57,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
|
||||
obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
|
||||
obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
|
||||
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
|
||||
obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o
|
||||
obj-$(CONFIG_TRACEPOINTS) += power-traces.o
|
||||
ifeq ($(CONFIG_PM),y)
|
||||
obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
|
||||
@ -66,7 +66,7 @@ ifeq ($(CONFIG_TRACING),y)
|
||||
obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
|
||||
endif
|
||||
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
|
||||
obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
|
||||
obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
|
||||
|
||||
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
|
||||
|
||||
|
@ -4341,22 +4341,22 @@ static const char readme_msg[] =
|
||||
"\t\t\t traces\n"
|
||||
#endif
|
||||
#endif /* CONFIG_STACK_TRACER */
|
||||
#ifdef CONFIG_KPROBE_EVENT
|
||||
#ifdef CONFIG_KPROBE_EVENTS
|
||||
" kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
|
||||
"\t\t\t Write into this file to define/undefine new trace events.\n"
|
||||
#endif
|
||||
#ifdef CONFIG_UPROBE_EVENT
|
||||
#ifdef CONFIG_UPROBE_EVENTS
|
||||
" uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
|
||||
"\t\t\t Write into this file to define/undefine new trace events.\n"
|
||||
#endif
|
||||
#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
|
||||
#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
|
||||
"\t accepts: event-definitions (one definition per line)\n"
|
||||
"\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
|
||||
"\t -:[<group>/]<event>\n"
|
||||
#ifdef CONFIG_KPROBE_EVENT
|
||||
#ifdef CONFIG_KPROBE_EVENTS
|
||||
"\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
|
||||
#endif
|
||||
#ifdef CONFIG_UPROBE_EVENT
|
||||
#ifdef CONFIG_UPROBE_EVENTS
|
||||
"\t place: <path>:<offset>\n"
|
||||
#endif
|
||||
"\t args: <name>=fetcharg[:type]\n"
|
||||
|
@ -248,7 +248,7 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \
|
||||
#define FETCH_TYPE_STRING 0
|
||||
#define FETCH_TYPE_STRSIZE 1
|
||||
|
||||
#ifdef CONFIG_KPROBE_EVENT
|
||||
#ifdef CONFIG_KPROBE_EVENTS
|
||||
struct symbol_cache;
|
||||
unsigned long update_symbol_cache(struct symbol_cache *sc);
|
||||
void free_symbol_cache(struct symbol_cache *sc);
|
||||
@ -278,7 +278,7 @@ alloc_symbol_cache(const char *sym, long offset)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_KPROBE_EVENT */
|
||||
#endif /* CONFIG_KPROBE_EVENTS */
|
||||
|
||||
struct probe_arg {
|
||||
struct fetch_param fetch;
|
||||
|
Loading…
Reference in New Issue
Block a user