Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Mostly tooling fixes, plus two uncore-PMU fixes, an uprobes fix, a perf-cgroups fix and an AUX events fix" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel/uncore: Add enable_box for client MSR uncore perf/x86/intel/uncore: Fix uncore num_counters uprobes/x86: Fix RIP-relative handling of EVEX-encoded instructions perf/core: Set cgroup in CPU contexts for new cgroup events perf/core: Fix sideband list-iteration vs. event ordering NULL pointer deference crash perf probe ppc64le: Fix probe location when using DWARF perf probe: Add function to post process kernel trace events tools: Sync cpufeatures headers with the kernel toops: Sync tools/include/uapi/linux/bpf.h with the kernel tools: Sync cpufeatures.h and vmx.h with the kernel perf probe: Support signedness casting perf stat: Avoid skew when reading events perf probe: Fix module name matching perf probe: Adjust map->reloc offset when finding kernel symbol from map perf hists: Trim libtraceevent trace_seq buffers perf script: Add 'bpf-output' field to usage message
This commit is contained in:
@@ -843,6 +843,32 @@ perf_cgroup_mark_enabled(struct perf_event *event,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update cpuctx->cgrp so that it is set when first cgroup event is added and
|
||||
* cleared when last cgroup event is removed.
|
||||
*/
|
||||
static inline void
|
||||
list_update_cgroup_event(struct perf_event *event,
|
||||
struct perf_event_context *ctx, bool add)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
|
||||
if (!is_cgroup_event(event))
|
||||
return;
|
||||
|
||||
if (add && ctx->nr_cgroups++)
|
||||
return;
|
||||
else if (!add && --ctx->nr_cgroups)
|
||||
return;
|
||||
/*
|
||||
* Because cgroup events are always per-cpu events,
|
||||
* this will always be called from the right CPU.
|
||||
*/
|
||||
cpuctx = __get_cpu_context(ctx);
|
||||
cpuctx->cgrp = add ? event->cgrp : NULL;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CGROUP_PERF */
|
||||
|
||||
static inline bool
|
||||
@@ -920,6 +946,13 @@ perf_cgroup_mark_enabled(struct perf_event *event,
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
list_update_cgroup_event(struct perf_event *event,
|
||||
struct perf_event_context *ctx, bool add)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -1392,6 +1425,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
|
||||
static void
|
||||
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||
{
|
||||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
|
||||
@@ -1412,8 +1446,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||
list_add_tail(&event->group_entry, list);
|
||||
}
|
||||
|
||||
if (is_cgroup_event(event))
|
||||
ctx->nr_cgroups++;
|
||||
list_update_cgroup_event(event, ctx, true);
|
||||
|
||||
list_add_rcu(&event->event_entry, &ctx->event_list);
|
||||
ctx->nr_events++;
|
||||
@@ -1581,8 +1614,6 @@ static void perf_group_attach(struct perf_event *event)
|
||||
static void
|
||||
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
|
||||
WARN_ON_ONCE(event->ctx != ctx);
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
@@ -1594,20 +1625,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||
|
||||
event->attach_state &= ~PERF_ATTACH_CONTEXT;
|
||||
|
||||
if (is_cgroup_event(event)) {
|
||||
ctx->nr_cgroups--;
|
||||
/*
|
||||
* Because cgroup events are always per-cpu events, this will
|
||||
* always be called from the right CPU.
|
||||
*/
|
||||
cpuctx = __get_cpu_context(ctx);
|
||||
/*
|
||||
* If there are no more cgroup events then clear cgrp to avoid
|
||||
* stale pointer in update_cgrp_time_from_cpuctx().
|
||||
*/
|
||||
if (!ctx->nr_cgroups)
|
||||
cpuctx->cgrp = NULL;
|
||||
}
|
||||
list_update_cgroup_event(event, ctx, false);
|
||||
|
||||
ctx->nr_events--;
|
||||
if (event->attr.inherit_stat)
|
||||
@@ -1716,8 +1734,8 @@ static inline int pmu_filter_match(struct perf_event *event)
|
||||
static inline int
|
||||
event_filter_match(struct perf_event *event)
|
||||
{
|
||||
return (event->cpu == -1 || event->cpu == smp_processor_id())
|
||||
&& perf_cgroup_match(event) && pmu_filter_match(event);
|
||||
return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
|
||||
perf_cgroup_match(event) && pmu_filter_match(event);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1737,8 +1755,8 @@ event_sched_out(struct perf_event *event,
|
||||
* maintained, otherwise bogus information is return
|
||||
* via read() for time_enabled, time_running:
|
||||
*/
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE
|
||||
&& !event_filter_match(event)) {
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE &&
|
||||
!event_filter_match(event)) {
|
||||
delta = tstamp - event->tstamp_stopped;
|
||||
event->tstamp_running += delta;
|
||||
event->tstamp_stopped = tstamp;
|
||||
@@ -2236,10 +2254,15 @@ perf_install_in_context(struct perf_event_context *ctx,
|
||||
|
||||
lockdep_assert_held(&ctx->mutex);
|
||||
|
||||
event->ctx = ctx;
|
||||
if (event->cpu != -1)
|
||||
event->cpu = cpu;
|
||||
|
||||
/*
|
||||
* Ensures that if we can observe event->ctx, both the event and ctx
|
||||
* will be 'complete'. See perf_iterate_sb_cpu().
|
||||
*/
|
||||
smp_store_release(&event->ctx, ctx);
|
||||
|
||||
if (!task) {
|
||||
cpu_function_call(cpu, __perf_install_in_context, event);
|
||||
return;
|
||||
@@ -5969,6 +5992,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
|
||||
struct perf_event *event;
|
||||
|
||||
list_for_each_entry_rcu(event, &pel->list, sb_list) {
|
||||
/*
|
||||
* Skip events that are not fully formed yet; ensure that
|
||||
* if we observe event->ctx, both event and ctx will be
|
||||
* complete enough. See perf_install_in_context().
|
||||
*/
|
||||
if (!smp_load_acquire(&event->ctx))
|
||||
continue;
|
||||
|
||||
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||
continue;
|
||||
if (!event_filter_match(event))
|
||||
|
||||
Reference in New Issue
Block a user