mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
perfcounters: add task migrations counter
Impact: add new feature, new sw counter Add a counter that counts the number of cross-CPU migrations a task is suffering. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
5d6a27d8a0
commit
6c594c21fc
@ -42,6 +42,8 @@ enum hw_event_types {
|
|||||||
PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
|
PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
|
||||||
PERF_COUNT_BRANCH_MISSES = 5,
|
PERF_COUNT_BRANCH_MISSES = 5,
|
||||||
|
|
||||||
|
PERF_HW_EVENTS_MAX = 6,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Special "software" counters provided by the kernel, even if
|
* Special "software" counters provided by the kernel, even if
|
||||||
* the hardware does not support performance counters. These
|
* the hardware does not support performance counters. These
|
||||||
@ -50,11 +52,11 @@ enum hw_event_types {
|
|||||||
*/
|
*/
|
||||||
PERF_COUNT_CPU_CLOCK = -1,
|
PERF_COUNT_CPU_CLOCK = -1,
|
||||||
PERF_COUNT_TASK_CLOCK = -2,
|
PERF_COUNT_TASK_CLOCK = -2,
|
||||||
/*
|
|
||||||
* Future software events:
|
|
||||||
*/
|
|
||||||
PERF_COUNT_PAGE_FAULTS = -3,
|
PERF_COUNT_PAGE_FAULTS = -3,
|
||||||
PERF_COUNT_CONTEXT_SWITCHES = -4,
|
PERF_COUNT_CONTEXT_SWITCHES = -4,
|
||||||
|
PERF_COUNT_CPU_MIGRATIONS = -5,
|
||||||
|
|
||||||
|
PERF_SW_EVENTS_MIN = -6,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1014,6 +1014,8 @@ struct sched_entity {
|
|||||||
u64 last_wakeup;
|
u64 last_wakeup;
|
||||||
u64 avg_overlap;
|
u64 avg_overlap;
|
||||||
|
|
||||||
|
u64 nr_migrations;
|
||||||
|
|
||||||
#ifdef CONFIG_SCHEDSTATS
|
#ifdef CONFIG_SCHEDSTATS
|
||||||
u64 wait_start;
|
u64 wait_start;
|
||||||
u64 wait_max;
|
u64 wait_max;
|
||||||
@ -1029,7 +1031,6 @@ struct sched_entity {
|
|||||||
u64 exec_max;
|
u64 exec_max;
|
||||||
u64 slice_max;
|
u64 slice_max;
|
||||||
|
|
||||||
u64 nr_migrations;
|
|
||||||
u64 nr_migrations_cold;
|
u64 nr_migrations_cold;
|
||||||
u64 nr_failed_migrations_affine;
|
u64 nr_failed_migrations_affine;
|
||||||
u64 nr_failed_migrations_running;
|
u64 nr_failed_migrations_running;
|
||||||
|
@ -936,6 +936,52 @@ static const struct hw_perf_counter_ops perf_ops_context_switches = {
|
|||||||
.hw_perf_counter_read = context_switches_perf_counter_read,
|
.hw_perf_counter_read = context_switches_perf_counter_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline u64 get_cpu_migrations(void)
|
||||||
|
{
|
||||||
|
return current->se.nr_migrations;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
|
||||||
|
{
|
||||||
|
u64 prev, now;
|
||||||
|
s64 delta;
|
||||||
|
|
||||||
|
prev = atomic64_read(&counter->hw.prev_count);
|
||||||
|
now = get_cpu_migrations();
|
||||||
|
|
||||||
|
atomic64_set(&counter->hw.prev_count, now);
|
||||||
|
|
||||||
|
delta = now - prev;
|
||||||
|
if (WARN_ON_ONCE(delta < 0))
|
||||||
|
delta = 0;
|
||||||
|
|
||||||
|
atomic64_add(delta, &counter->count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
|
||||||
|
{
|
||||||
|
cpu_migrations_perf_counter_update(counter);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpu_migrations_perf_counter_enable(struct perf_counter *counter)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* se.nr_migrations is a per-task value already,
|
||||||
|
* so we dont have to clear it on switch-in.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
|
||||||
|
{
|
||||||
|
cpu_migrations_perf_counter_update(counter);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
|
||||||
|
.hw_perf_counter_enable = cpu_migrations_perf_counter_enable,
|
||||||
|
.hw_perf_counter_disable = cpu_migrations_perf_counter_disable,
|
||||||
|
.hw_perf_counter_read = cpu_migrations_perf_counter_read,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops *
|
static const struct hw_perf_counter_ops *
|
||||||
sw_perf_counter_init(struct perf_counter *counter)
|
sw_perf_counter_init(struct perf_counter *counter)
|
||||||
{
|
{
|
||||||
@ -951,6 +997,9 @@ sw_perf_counter_init(struct perf_counter *counter)
|
|||||||
case PERF_COUNT_CONTEXT_SWITCHES:
|
case PERF_COUNT_CONTEXT_SWITCHES:
|
||||||
hw_ops = &perf_ops_context_switches;
|
hw_ops = &perf_ops_context_switches;
|
||||||
break;
|
break;
|
||||||
|
case PERF_COUNT_CPU_MIGRATIONS:
|
||||||
|
hw_ops = &perf_ops_cpu_migrations;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1852,12 +1852,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|||||||
p->se.sleep_start -= clock_offset;
|
p->se.sleep_start -= clock_offset;
|
||||||
if (p->se.block_start)
|
if (p->se.block_start)
|
||||||
p->se.block_start -= clock_offset;
|
p->se.block_start -= clock_offset;
|
||||||
|
#endif
|
||||||
if (old_cpu != new_cpu) {
|
if (old_cpu != new_cpu) {
|
||||||
schedstat_inc(p, se.nr_migrations);
|
p->se.nr_migrations++;
|
||||||
|
#ifdef CONFIG_SCHEDSTATS
|
||||||
if (task_hot(p, old_rq->clock, NULL))
|
if (task_hot(p, old_rq->clock, NULL))
|
||||||
schedstat_inc(p, se.nr_forced2_migrations);
|
schedstat_inc(p, se.nr_forced2_migrations);
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
p->se.vruntime -= old_cfsrq->min_vruntime -
|
p->se.vruntime -= old_cfsrq->min_vruntime -
|
||||||
new_cfsrq->min_vruntime;
|
new_cfsrq->min_vruntime;
|
||||||
|
|
||||||
@ -2375,6 +2377,7 @@ static void __sched_fork(struct task_struct *p)
|
|||||||
p->se.exec_start = 0;
|
p->se.exec_start = 0;
|
||||||
p->se.sum_exec_runtime = 0;
|
p->se.sum_exec_runtime = 0;
|
||||||
p->se.prev_sum_exec_runtime = 0;
|
p->se.prev_sum_exec_runtime = 0;
|
||||||
|
p->se.nr_migrations = 0;
|
||||||
p->se.last_wakeup = 0;
|
p->se.last_wakeup = 0;
|
||||||
p->se.avg_overlap = 0;
|
p->se.avg_overlap = 0;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user