mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 08:02:07 +00:00
perf/core: Collapse more IPI loops
This patch collapses the two 'hard' cases, which are perf_event_{dis,en}able(). I cannot seem to convince myself the current code is correct. So starting with perf_event_disable(); we don't strictly need to test for event->state == ACTIVE, ctx->is_active is enough. If the event is not scheduled while the ctx is, __perf_event_disable() still does the right thing. Its a little less efficient to IPI in that case, over-all simpler. For perf_event_enable(); the same goes, but I think that's actually broken in its current form. The current condition is: ctx->is_active && event->state == OFF, that means it doesn't do anything when !ctx->active && event->state == OFF. This is wrong, it should still mark the event INACTIVE in that case, otherwise we'll still not try and schedule the event once the context becomes active again. This patch implements the two function using the new event_function_call() and does away with the tricky event->state tests. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Alexander Shishkin <alexander.shishkin@intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9cc96b0a21
commit
7b648018f6
@ -1766,6 +1766,20 @@ int __perf_event_disable(void *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ___perf_event_disable(void *info)
|
||||
{
|
||||
struct perf_event *event = info;
|
||||
|
||||
/*
|
||||
* Since we have the lock this context can't be scheduled
|
||||
* in, so we can change the state safely.
|
||||
*/
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
||||
update_group_times(event);
|
||||
event->state = PERF_EVENT_STATE_OFF;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable a event.
|
||||
*
|
||||
@ -1782,43 +1796,16 @@ int __perf_event_disable(void *info)
|
||||
static void _perf_event_disable(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
struct task_struct *task = ctx->task;
|
||||
|
||||
if (!task) {
|
||||
/*
|
||||
* Disable the event on the cpu that it's on
|
||||
*/
|
||||
cpu_function_call(event->cpu, __perf_event_disable, event);
|
||||
return;
|
||||
}
|
||||
|
||||
retry:
|
||||
if (!task_function_call(task, __perf_event_disable, event))
|
||||
return;
|
||||
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
/*
|
||||
* If the event is still active, we need to retry the cross-call.
|
||||
*/
|
||||
if (event->state == PERF_EVENT_STATE_ACTIVE) {
|
||||
if (event->state <= PERF_EVENT_STATE_OFF) {
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
/*
|
||||
* Reload the task pointer, it might have been changed by
|
||||
* a concurrent perf_event_context_sched_out().
|
||||
*/
|
||||
task = ctx->task;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we have the lock this context can't be scheduled
|
||||
* in, so we can change the state safely.
|
||||
*/
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
||||
update_group_times(event);
|
||||
event->state = PERF_EVENT_STATE_OFF;
|
||||
return;
|
||||
}
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
|
||||
event_function_call(event, __perf_event_disable,
|
||||
___perf_event_disable, event);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2269,6 +2256,11 @@ unlock:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ___perf_event_enable(void *info)
|
||||
{
|
||||
__perf_event_mark_enabled((struct perf_event *)info);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable a event.
|
||||
*
|
||||
@ -2281,58 +2273,26 @@ unlock:
|
||||
static void _perf_event_enable(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
struct task_struct *task = ctx->task;
|
||||
|
||||
if (!task) {
|
||||
/*
|
||||
* Enable the event on the cpu that it's on
|
||||
*/
|
||||
cpu_function_call(event->cpu, __perf_event_enable, event);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
if (event->state >= PERF_EVENT_STATE_INACTIVE)
|
||||
goto out;
|
||||
if (event->state >= PERF_EVENT_STATE_INACTIVE) {
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the event is in error state, clear that first.
|
||||
* That way, if we see the event in error state below, we
|
||||
* know that it has gone back into error state, as distinct
|
||||
* from the task having been scheduled away before the
|
||||
* cross-call arrived.
|
||||
*
|
||||
* That way, if we see the event in error state below, we know that it
|
||||
* has gone back into error state, as distinct from the task having
|
||||
* been scheduled away before the cross-call arrived.
|
||||
*/
|
||||
if (event->state == PERF_EVENT_STATE_ERROR)
|
||||
event->state = PERF_EVENT_STATE_OFF;
|
||||
|
||||
retry:
|
||||
if (!ctx->is_active) {
|
||||
__perf_event_mark_enabled(event);
|
||||
goto out;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
|
||||
if (!task_function_call(task, __perf_event_enable, event))
|
||||
return;
|
||||
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
|
||||
/*
|
||||
* If the context is active and the event is still off,
|
||||
* we need to retry the cross-call.
|
||||
*/
|
||||
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
|
||||
/*
|
||||
* task could have been flipped by a concurrent
|
||||
* perf_event_context_sched_out()
|
||||
*/
|
||||
task = ctx->task;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
out:
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
event_function_call(event, __perf_event_enable,
|
||||
___perf_event_enable, event);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user