mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
perf: Fix event_function_call() locking
All the event_function/@func call context already uses perf_ctx_lock() except for the !ctx->is_active case. Make it all consistent. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Reviewed-by: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20240807115550.138301094@infradead.org
This commit is contained in:
parent
9a32bd9901
commit
558abc7e3f
@ -263,6 +263,7 @@ unlock:
|
||||
static void event_function_call(struct perf_event *event, event_f func, void *data)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
|
||||
struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
|
||||
struct event_function_struct efs = {
|
||||
.event = event,
|
||||
@ -291,22 +292,22 @@ again:
|
||||
if (!task_function_call(task, event_function, &efs))
|
||||
return;
|
||||
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
perf_ctx_lock(cpuctx, ctx);
|
||||
/*
|
||||
* Reload the task pointer, it might have been changed by
|
||||
* a concurrent perf_event_context_sched_out().
|
||||
*/
|
||||
task = ctx->task;
|
||||
if (task == TASK_TOMBSTONE) {
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
perf_ctx_unlock(cpuctx, ctx);
|
||||
return;
|
||||
}
|
||||
if (ctx->is_active) {
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
perf_ctx_unlock(cpuctx, ctx);
|
||||
goto again;
|
||||
}
|
||||
func(event, NULL, ctx, data);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
perf_ctx_unlock(cpuctx, ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user