sched, timers: move calc_load() to scheduler
Dimitri Sivanich noticed that xtime_lock is held write locked across calc_load() which iterates over all online CPUs. That can cause long latencies for xtime_lock readers on large SMP systems. The load average calculation is an rough estimate anyway so there is no real need to protect the readers vs. the update. It's not a problem when the avenrun array is updated while a reader copies the values. Instead of iterating over all online CPUs let the scheduler_tick code update the number of active tasks shortly before the avenrun update happens. The avenrun update itself is handled by the CPU which calls do_timer(). [ Impact: reduce xtime_lock write locked section ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
@@ -1122,47 +1122,6 @@ void update_process_times(int user_tick)
|
||||
run_posix_cpu_timers(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Nr of active tasks - counted in fixed-point numbers
|
||||
*/
|
||||
static unsigned long count_active_tasks(void)
|
||||
{
|
||||
return nr_active() * FIXED_1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hmm.. Changed this, as the GNU make sources (load.c) seems to
|
||||
* imply that avenrun[] is the standard name for this kind of thing.
|
||||
* Nothing else seems to be standardized: the fractional size etc
|
||||
* all seem to differ on different machines.
|
||||
*
|
||||
* Requires xtime_lock to access.
|
||||
*/
|
||||
unsigned long avenrun[3];
|
||||
|
||||
EXPORT_SYMBOL(avenrun);
|
||||
|
||||
/*
|
||||
* calc_load - given tick count, update the avenrun load estimates.
|
||||
* This is called while holding a write_lock on xtime_lock.
|
||||
*/
|
||||
static inline void calc_load(unsigned long ticks)
|
||||
{
|
||||
unsigned long active_tasks; /* fixed-point */
|
||||
static int count = LOAD_FREQ;
|
||||
|
||||
count -= ticks;
|
||||
if (unlikely(count < 0)) {
|
||||
active_tasks = count_active_tasks();
|
||||
do {
|
||||
CALC_LOAD(avenrun[0], EXP_1, active_tasks);
|
||||
CALC_LOAD(avenrun[1], EXP_5, active_tasks);
|
||||
CALC_LOAD(avenrun[2], EXP_15, active_tasks);
|
||||
count += LOAD_FREQ;
|
||||
} while (count < 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function runs timers and the timer-tq in bottom half context.
|
||||
*/
|
||||
@@ -1186,16 +1145,6 @@ void run_local_timers(void)
|
||||
softlockup_tick();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the timer interrupt. xtime_lock must already be taken
|
||||
* by the timer IRQ!
|
||||
*/
|
||||
static inline void update_times(unsigned long ticks)
|
||||
{
|
||||
update_wall_time();
|
||||
calc_load(ticks);
|
||||
}
|
||||
|
||||
/*
|
||||
* The 64-bit jiffies value is not atomic - you MUST NOT read it
|
||||
* without sampling the sequence number in xtime_lock.
|
||||
@@ -1205,7 +1154,8 @@ static inline void update_times(unsigned long ticks)
|
||||
void do_timer(unsigned long ticks)
|
||||
{
|
||||
jiffies_64 += ticks;
|
||||
update_times(ticks);
|
||||
update_wall_time();
|
||||
calc_global_load();
|
||||
}
|
||||
|
||||
#ifdef __ARCH_WANT_SYS_ALARM
|
||||
|
||||
Reference in New Issue
Block a user