mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
perf: Fix interrupt handler timing harness
This patch fixes a serious bug in:
14c63f17b1
perf: Drop sample rate when sampling is too slow
There was an misunderstanding on the API of the do_div()
macro. It returns the remainder of the division and this
was not what the function expected leading to disabling the
interrupt latency watchdog.
This patch also remove a duplicate assignment in
perf_sample_event_took().
Signed-off-by: Stephane Eranian <eranian@google.com>
Cc: peterz@infradead.org
Cc: dave.hansen@linux.intel.com
Cc: ak@linux.intel.com
Cc: jolsa@redhat.com
Link: http://lkml.kernel.org/r/20130704223010.GA30625@quad
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
100ac53315
commit
e5302920da
@ -182,7 +182,7 @@ void update_perf_cpu_limits(void)
|
||||
u64 tmp = perf_sample_period_ns;
|
||||
|
||||
tmp *= sysctl_perf_cpu_time_max_percent;
|
||||
tmp = do_div(tmp, 100);
|
||||
do_div(tmp, 100);
|
||||
atomic_set(&perf_sample_allowed_ns, tmp);
|
||||
}
|
||||
|
||||
@ -232,7 +232,7 @@ DEFINE_PER_CPU(u64, running_sample_length);
|
||||
void perf_sample_event_took(u64 sample_len_ns)
|
||||
{
|
||||
u64 avg_local_sample_len;
|
||||
u64 local_samples_len = __get_cpu_var(running_sample_length);
|
||||
u64 local_samples_len;
|
||||
|
||||
if (atomic_read(&perf_sample_allowed_ns) == 0)
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user