arm64: perf: Add cap_user_time_short

This completes the ARM64 cap_user_time support.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Link: https://lore.kernel.org/r/20200716051130.4359-7-leo.yan@linaro.org
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Peter Zijlstra 2020-07-16 13:11:29 +08:00 committed by Will Deacon
parent 6c0246a458
commit c8f9eb0d6e

View File

@ -1177,6 +1177,7 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time = 0; userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0; userpg->cap_user_time_zero = 0;
userpg->cap_user_time_short = 0;
do { do {
rd = sched_clock_read_begin(&seq); rd = sched_clock_read_begin(&seq);
@ -1187,13 +1188,13 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->time_mult = rd->mult; userpg->time_mult = rd->mult;
userpg->time_shift = rd->shift; userpg->time_shift = rd->shift;
userpg->time_zero = rd->epoch_ns; userpg->time_zero = rd->epoch_ns;
userpg->time_cycles = rd->epoch_cyc;
userpg->time_mask = rd->sched_clock_mask;
/* /*
* This isn't strictly correct, the ARM64 counter can be * Subtract the cycle base, such that software that
* 'short' and then we get funnies when it wraps. The correct * doesn't know about cap_user_time_short still 'works'
* thing would be to extend the perf ABI with a cycle and mask * assuming no wraps.
* value, but because wrapping on ARM64 is very rare in
* practise this 'works'.
*/ */
ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
userpg->time_zero -= ns; userpg->time_zero -= ns;
@ -1219,4 +1220,5 @@ void arch_perf_update_userpage(struct perf_event *event,
*/ */
userpg->cap_user_time = 1; userpg->cap_user_time = 1;
userpg->cap_user_time_zero = 1; userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
} }