mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The biggest change in this cycle is the rewrite of the main SMP load
balancing metric: the CPU load/utilization. The main goal was to make
the metric more precise and more representative - see the changelog of
this commit for the gory details:
9d89c257df
("sched/fair: Rewrite runnable load and utilization average tracking")
It is done in a way that significantly reduces complexity of the code:
5 files changed, 249 insertions(+), 494 deletions(-)
and the performance testing results are encouraging. Nevertheless we
need to keep an eye on potential regressions, since this potentially
affects every SMP workload in existence.
This work comes from Yuyang Du.
Other changes:
- SCHED_DL updates. (Andrea Parri)
- Simplify architecture callbacks by removing finish_arch_switch().
(Peter Zijlstra et al)
- cputime accounting: guarantee stime + utime == rtime. (Peter
Zijlstra)
- optimize idle CPU wakeups some more - inspired by Facebook server
loads. (Mike Galbraith)
- stop_machine fixes and updates. (Oleg Nesterov)
- Introduce the 'trace_sched_waking' tracepoint. (Peter Zijlstra)
- sched/numa tweaks. (Srikar Dronamraju)
- misc fixes and small cleanups"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits)
sched/deadline: Fix comment in enqueue_task_dl()
sched/deadline: Fix comment in push_dl_tasks()
sched: Change the sched_class::set_cpus_allowed() calling context
sched: Make sched_class::set_cpus_allowed() unconditional
sched: Fix a race between __kthread_bind() and sched_setaffinity()
sched: Ensure a task has a non-normalized vruntime when returning back to CFS
sched/numa: Fix NUMA_DIRECT topology identification
tile: Reorganize _switch_to()
sched, sparc32: Update scheduler comments in copy_thread()
sched: Remove finish_arch_switch()
sched, tile: Remove finish_arch_switch
sched, sh: Fold finish_arch_switch() into switch_to()
sched, score: Remove finish_arch_switch()
sched, avr32: Remove finish_arch_switch()
sched, MIPS: Get rid of finish_arch_switch()
sched, arm: Remove finish_arch_switch()
sched/fair: Clean up load average references
sched/fair: Provide runnable_load_avg back to cfs_rq
sched/fair: Remove task and group entity load when they are dead
sched/fair: Init cfs_rq's sched_entity load average
...
This commit is contained in:
commit
a1d8561172
@ -10,7 +10,9 @@
|
||||
* CPU.
|
||||
*/
|
||||
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
|
||||
#define finish_arch_switch(prev) dsb(ish)
|
||||
#define __complete_pending_tlbi() dsb(ish)
|
||||
#else
|
||||
#define __complete_pending_tlbi()
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
|
||||
|
||||
#define switch_to(prev,next,last) \
|
||||
do { \
|
||||
__complete_pending_tlbi(); \
|
||||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
|
@ -15,11 +15,13 @@
|
||||
*/
|
||||
#ifdef CONFIG_OWNERSHIP_TRACE
|
||||
#include <asm/ocd.h>
|
||||
#define finish_arch_switch(prev) \
|
||||
#define ocd_switch(prev, next) \
|
||||
do { \
|
||||
ocd_write(PID, prev->pid); \
|
||||
ocd_write(PID, current->pid); \
|
||||
ocd_write(PID, next->pid); \
|
||||
} while(0)
|
||||
#else
|
||||
#define ocd_switch(prev, next)
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -38,6 +40,7 @@ extern struct task_struct *__switch_to(struct task_struct *,
|
||||
struct cpu_context *);
|
||||
#define switch_to(prev, next, last) \
|
||||
do { \
|
||||
ocd_switch(prev, next); \
|
||||
last = __switch_to(prev, &prev->thread.cpu_context + 1, \
|
||||
&next->thread.cpu_context); \
|
||||
} while (0)
|
||||
|
@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* For newly created kernel threads switch_to() will return to
|
||||
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
|
||||
* That is, everything following resume() will be skipped for new threads.
|
||||
* So everything that matters to new threads should be placed before resume().
|
||||
*/
|
||||
#define switch_to(prev, next, last) \
|
||||
do { \
|
||||
u32 __c0_stat; \
|
||||
s32 __fpsave = FP_SAVE_NONE; \
|
||||
__mips_mt_fpaff_switch_to(prev); \
|
||||
if (cpu_has_dsp) \
|
||||
if (cpu_has_dsp) { \
|
||||
__save_dsp(prev); \
|
||||
if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \
|
||||
if (cop2_lazy_restore) \
|
||||
KSTK_STATUS(prev) &= ~ST0_CU2; \
|
||||
__c0_stat = read_c0_status(); \
|
||||
write_c0_status(__c0_stat | ST0_CU2); \
|
||||
cop2_save(prev); \
|
||||
write_c0_status(__c0_stat & ~ST0_CU2); \
|
||||
__restore_dsp(next); \
|
||||
} \
|
||||
if (cop2_present) { \
|
||||
set_c0_status(ST0_CU2); \
|
||||
if ((KSTK_STATUS(prev) & ST0_CU2)) { \
|
||||
if (cop2_lazy_restore) \
|
||||
KSTK_STATUS(prev) &= ~ST0_CU2; \
|
||||
cop2_save(prev); \
|
||||
} \
|
||||
if (KSTK_STATUS(next) & ST0_CU2 && \
|
||||
!cop2_lazy_restore) { \
|
||||
cop2_restore(next); \
|
||||
} \
|
||||
clear_c0_status(ST0_CU2); \
|
||||
} \
|
||||
__clear_software_ll_bit(); \
|
||||
if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
|
||||
__fpsave = FP_SAVE_SCALAR; \
|
||||
if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
|
||||
__fpsave = FP_SAVE_VECTOR; \
|
||||
if (cpu_has_userlocal) \
|
||||
write_c0_userlocal(task_thread_info(next)->tp_value); \
|
||||
__restore_watch(); \
|
||||
disable_msa(); \
|
||||
(last) = resume(prev, next, task_thread_info(next), __fpsave); \
|
||||
} while (0)
|
||||
|
||||
#define finish_arch_switch(prev) \
|
||||
do { \
|
||||
u32 __c0_stat; \
|
||||
if (cop2_present && !cop2_lazy_restore && \
|
||||
(KSTK_STATUS(current) & ST0_CU2)) { \
|
||||
__c0_stat = read_c0_status(); \
|
||||
write_c0_status(__c0_stat | ST0_CU2); \
|
||||
cop2_restore(current); \
|
||||
write_c0_status(__c0_stat & ~ST0_CU2); \
|
||||
} \
|
||||
if (cpu_has_dsp) \
|
||||
__restore_dsp(current); \
|
||||
if (cpu_has_userlocal) \
|
||||
write_c0_userlocal(current_thread_info()->tp_value); \
|
||||
__restore_watch(); \
|
||||
disable_msa(); \
|
||||
} while (0)
|
||||
|
||||
#endif /* _ASM_SWITCH_TO_H */
|
||||
|
@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
vc->runner = vcpu;
|
||||
if (n_ceded == vc->n_runnable) {
|
||||
kvmppc_vcore_blocked(vc);
|
||||
} else if (should_resched()) {
|
||||
} else if (need_resched()) {
|
||||
vc->vcore_state = VCORE_PREEMPT;
|
||||
/* Let something else run */
|
||||
cond_resched_lock(&vc->lock);
|
||||
|
@ -8,6 +8,4 @@ do { \
|
||||
(last) = resume(prev, next, task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
#define finish_arch_switch(prev) do {} while (0)
|
||||
|
||||
#endif /* _ASM_SCORE_SWITCH_TO_H */
|
||||
|
@ -78,6 +78,8 @@ do { \
|
||||
\
|
||||
if (is_dsp_enabled(prev)) \
|
||||
__save_dsp(prev); \
|
||||
if (is_dsp_enabled(next)) \
|
||||
__restore_dsp(next); \
|
||||
\
|
||||
__ts1 = (u32 *)&prev->thread.sp; \
|
||||
__ts2 = (u32 *)&prev->thread.pc; \
|
||||
@ -125,10 +127,4 @@ do { \
|
||||
last = __last; \
|
||||
} while (0)
|
||||
|
||||
#define finish_arch_switch(prev) \
|
||||
do { \
|
||||
if (is_dsp_enabled(prev)) \
|
||||
__restore_dsp(prev); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __ASM_SH_SWITCH_TO_32_H */
|
||||
|
@ -333,11 +333,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||
childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
|
||||
|
||||
/*
|
||||
* A new process must start with interrupts closed in 2.5,
|
||||
* because this is how Mingo's scheduler works (see schedule_tail
|
||||
* and finish_arch_switch). If we do not do it, a timer interrupt hits
|
||||
* before we unlock, attempts to re-take the rq->lock, and then we die.
|
||||
* Thus, kpsr|=PSR_PIL.
|
||||
* A new process must start with interrupts disabled, see schedule_tail()
|
||||
* and finish_task_switch(). (If we do not do it and if a timer interrupt
|
||||
* hits before we unlock and attempts to take the rq->lock, we deadlock.)
|
||||
*
|
||||
* Thus, kpsr |= PSR_PIL.
|
||||
*/
|
||||
ti->ksp = (unsigned long) new_stack;
|
||||
p->thread.kregs = childregs;
|
||||
|
@ -53,15 +53,13 @@ extern unsigned long get_switch_to_pc(void);
|
||||
* Kernel threads can check to see if they need to migrate their
|
||||
* stack whenever they return from a context switch; for user
|
||||
* threads, we defer until they are returning to user-space.
|
||||
* We defer homecache migration until the runqueue lock is released.
|
||||
*/
|
||||
#define finish_arch_switch(prev) do { \
|
||||
if (unlikely((prev)->state == TASK_DEAD)) \
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
|
||||
((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
|
||||
#define finish_arch_post_lock_switch() do { \
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
|
||||
(current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
|
||||
if (current->mm == NULL && !kstack_hash && \
|
||||
current_thread_info()->homecache_cpu != smp_processor_id()) \
|
||||
current_thread_info()->homecache_cpu != raw_smp_processor_id()) \
|
||||
homecache_migrate_kthread(); \
|
||||
} while (0)
|
||||
|
||||
|
@ -446,6 +446,11 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
|
||||
hardwall_switch_tasks(prev, next);
|
||||
#endif
|
||||
|
||||
/* Notify the simulator of task exit. */
|
||||
if (unlikely(prev->state == TASK_DEAD))
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |
|
||||
(prev->pid << _SIM_CONTROL_OPERATOR_BITS));
|
||||
|
||||
/*
|
||||
* Switch kernel SP, PC, and callee-saved registers.
|
||||
* In the context of the new task, return the old task pointer
|
||||
|
@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
/*
|
||||
* Returns true when we need to resched and can (barring IRQ state).
|
||||
*/
|
||||
static __always_inline bool should_resched(void)
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(!raw_cpu_read_4(__preempt_count));
|
||||
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
@ -123,6 +123,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
|
||||
* cpuidle mechanism enables interrupts and doing that with timekeeping
|
||||
* suspended is generally unsafe.
|
||||
*/
|
||||
stop_critical_timings();
|
||||
drv->states[index].enter_freeze(dev, drv, index);
|
||||
WARN_ON(!irqs_disabled());
|
||||
/*
|
||||
@ -131,6 +132,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
|
||||
* critical sections, so tell RCU about that.
|
||||
*/
|
||||
RCU_NONIDLE(tick_unfreeze());
|
||||
start_critical_timings();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -195,7 +197,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
trace_cpu_idle_rcuidle(index, dev->cpu);
|
||||
time_start = ktime_get();
|
||||
|
||||
stop_critical_timings();
|
||||
entered_state = target_state->enter(dev, drv, index);
|
||||
start_critical_timings();
|
||||
|
||||
time_end = ktime_get();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
|
||||
asmlinkage __visible void xen_maybe_preempt_hcall(void)
|
||||
{
|
||||
if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
|
||||
&& should_resched())) {
|
||||
&& need_resched())) {
|
||||
/*
|
||||
* Clear flag as we may be rescheduled on a different
|
||||
* cpu.
|
||||
|
@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
/*
|
||||
* Returns true when we need to resched and can (barring IRQ state).
|
||||
*/
|
||||
static __always_inline bool should_resched(void)
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(!preempt_count() && tif_need_resched());
|
||||
return unlikely(preempt_count() == preempt_offset &&
|
||||
tif_need_resched());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
@ -32,6 +32,14 @@ extern struct fs_struct init_fs;
|
||||
#define INIT_CPUSET_SEQ(tsk)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \
|
||||
},
|
||||
#else
|
||||
#define INIT_PREV_CPUTIME(x)
|
||||
#endif
|
||||
|
||||
#define INIT_SIGNALS(sig) { \
|
||||
.nr_threads = 1, \
|
||||
.thread_head = LIST_HEAD_INIT(init_task.thread_node), \
|
||||
@ -46,6 +54,7 @@ extern struct fs_struct init_fs;
|
||||
.cputime_atomic = INIT_CPUTIME_ATOMIC, \
|
||||
.running = 0, \
|
||||
}, \
|
||||
INIT_PREV_CPUTIME(sig) \
|
||||
.cred_guard_mutex = \
|
||||
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
|
||||
}
|
||||
@ -246,6 +255,7 @@ extern struct task_group root_task_group;
|
||||
INIT_TASK_RCU_TASKS(tsk) \
|
||||
INIT_CPUSET_SEQ(tsk) \
|
||||
INIT_RT_MUTEXES(tsk) \
|
||||
INIT_PREV_CPUTIME(tsk) \
|
||||
INIT_VTIME(tsk) \
|
||||
INIT_NUMA_BALANCING(tsk) \
|
||||
INIT_KASAN(tsk) \
|
||||
|
@ -38,6 +38,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
|
||||
})
|
||||
|
||||
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
||||
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
|
||||
int kthread_stop(struct task_struct *k);
|
||||
bool kthread_should_stop(void);
|
||||
bool kthread_should_park(void);
|
||||
|
@ -84,12 +84,20 @@
|
||||
*/
|
||||
#define in_nmi() (preempt_count() & NMI_MASK)
|
||||
|
||||
/*
|
||||
* The preempt_count offset after preempt_disable();
|
||||
*/
|
||||
#if defined(CONFIG_PREEMPT_COUNT)
|
||||
# define PREEMPT_DISABLE_OFFSET 1
|
||||
# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
|
||||
#else
|
||||
# define PREEMPT_DISABLE_OFFSET 0
|
||||
# define PREEMPT_DISABLE_OFFSET 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The preempt_count offset after spin_lock()
|
||||
*/
|
||||
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
|
||||
|
||||
/*
|
||||
* The preempt_count offset needed for things like:
|
||||
*
|
||||
@ -103,7 +111,7 @@
|
||||
*
|
||||
* Work as expected.
|
||||
*/
|
||||
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
|
||||
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
|
||||
|
||||
/*
|
||||
* Are we running in atomic context? WARNING: this macro cannot
|
||||
@ -124,7 +132,8 @@
|
||||
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
|
||||
extern void preempt_count_add(int val);
|
||||
extern void preempt_count_sub(int val);
|
||||
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
|
||||
#define preempt_count_dec_and_test() \
|
||||
({ preempt_count_sub(1); should_resched(0); })
|
||||
#else
|
||||
#define preempt_count_add(val) __preempt_count_add(val)
|
||||
#define preempt_count_sub(val) __preempt_count_sub(val)
|
||||
@ -184,7 +193,7 @@ do { \
|
||||
|
||||
#define preempt_check_resched() \
|
||||
do { \
|
||||
if (should_resched()) \
|
||||
if (should_resched(0)) \
|
||||
__preempt_schedule(); \
|
||||
} while (0)
|
||||
|
||||
|
@ -530,39 +530,49 @@ struct cpu_itimer {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cputime - snaphsot of system and user cputime
|
||||
* struct prev_cputime - snaphsot of system and user cputime
|
||||
* @utime: time spent in user mode
|
||||
* @stime: time spent in system mode
|
||||
* @lock: protects the above two fields
|
||||
*
|
||||
* Gathers a generic snapshot of user and system time.
|
||||
* Stores previous user/system time values such that we can guarantee
|
||||
* monotonicity.
|
||||
*/
|
||||
struct cputime {
|
||||
struct prev_cputime {
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
cputime_t utime;
|
||||
cputime_t stime;
|
||||
raw_spinlock_t lock;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void prev_cputime_init(struct prev_cputime *prev)
|
||||
{
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
prev->utime = prev->stime = 0;
|
||||
raw_spin_lock_init(&prev->lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* struct task_cputime - collected CPU time counts
|
||||
* @utime: time spent in user mode, in &cputime_t units
|
||||
* @stime: time spent in kernel mode, in &cputime_t units
|
||||
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
|
||||
*
|
||||
* This is an extension of struct cputime that includes the total runtime
|
||||
* spent by the task from the scheduler point of view.
|
||||
*
|
||||
* As a result, this structure groups together three kinds of CPU time
|
||||
* that are tracked for threads and thread groups. Most things considering
|
||||
* CPU time want to group these counts together and treat all three
|
||||
* of them in parallel.
|
||||
* This structure groups together three kinds of CPU time that are tracked for
|
||||
* threads and thread groups. Most things considering CPU time want to group
|
||||
* these counts together and treat all three of them in parallel.
|
||||
*/
|
||||
struct task_cputime {
|
||||
cputime_t utime;
|
||||
cputime_t stime;
|
||||
unsigned long long sum_exec_runtime;
|
||||
};
|
||||
|
||||
/* Alternate field names when used to cache expirations. */
|
||||
#define prof_exp stime
|
||||
#define virt_exp utime
|
||||
#define prof_exp stime
|
||||
#define sched_exp sum_exec_runtime
|
||||
|
||||
#define INIT_CPUTIME \
|
||||
@ -715,9 +725,7 @@ struct signal_struct {
|
||||
cputime_t utime, stime, cutime, cstime;
|
||||
cputime_t gtime;
|
||||
cputime_t cgtime;
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
struct cputime prev_cputime;
|
||||
#endif
|
||||
struct prev_cputime prev_cputime;
|
||||
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
|
||||
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
|
||||
unsigned long inblock, oublock, cinblock, coublock;
|
||||
@ -1167,29 +1175,24 @@ struct load_weight {
|
||||
u32 inv_weight;
|
||||
};
|
||||
|
||||
/*
|
||||
* The load_avg/util_avg accumulates an infinite geometric series.
|
||||
* 1) load_avg factors the amount of time that a sched_entity is
|
||||
* runnable on a rq into its weight. For cfs_rq, it is the aggregated
|
||||
* such weights of all runnable and blocked sched_entities.
|
||||
* 2) util_avg factors frequency scaling into the amount of time
|
||||
* that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
|
||||
* For cfs_rq, it is the aggregated such times of all runnable and
|
||||
* blocked sched_entities.
|
||||
* The 64 bit load_sum can:
|
||||
* 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
|
||||
* the highest weight (=88761) always runnable, we should not overflow
|
||||
* 2) for entity, support any load.weight always runnable
|
||||
*/
|
||||
struct sched_avg {
|
||||
u64 last_runnable_update;
|
||||
s64 decay_count;
|
||||
/*
|
||||
* utilization_avg_contrib describes the amount of time that a
|
||||
* sched_entity is running on a CPU. It is based on running_avg_sum
|
||||
* and is scaled in the range [0..SCHED_LOAD_SCALE].
|
||||
* load_avg_contrib described the amount of time that a sched_entity
|
||||
* is runnable on a rq. It is based on both runnable_avg_sum and the
|
||||
* weight of the task.
|
||||
*/
|
||||
unsigned long load_avg_contrib, utilization_avg_contrib;
|
||||
/*
|
||||
* These sums represent an infinite geometric series and so are bound
|
||||
* above by 1024/(1-y). Thus we only need a u32 to store them for all
|
||||
* choices of y < 1-2^(-32)*1024.
|
||||
* running_avg_sum reflects the time that the sched_entity is
|
||||
* effectively running on the CPU.
|
||||
* runnable_avg_sum represents the amount of time a sched_entity is on
|
||||
* a runqueue which includes the running time that is monitored by
|
||||
* running_avg_sum.
|
||||
*/
|
||||
u32 runnable_avg_sum, avg_period, running_avg_sum;
|
||||
u64 last_update_time, load_sum;
|
||||
u32 util_sum, period_contrib;
|
||||
unsigned long load_avg, util_avg;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
@ -1255,7 +1258,7 @@ struct sched_entity {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Per-entity load-tracking */
|
||||
/* Per entity load average tracking */
|
||||
struct sched_avg avg;
|
||||
#endif
|
||||
};
|
||||
@ -1351,9 +1354,9 @@ struct task_struct {
|
||||
#ifdef CONFIG_SMP
|
||||
struct llist_node wake_entry;
|
||||
int on_cpu;
|
||||
struct task_struct *last_wakee;
|
||||
unsigned long wakee_flips;
|
||||
unsigned int wakee_flips;
|
||||
unsigned long wakee_flip_decay_ts;
|
||||
struct task_struct *last_wakee;
|
||||
|
||||
int wake_cpu;
|
||||
#endif
|
||||
@ -1481,9 +1484,7 @@ struct task_struct {
|
||||
|
||||
cputime_t utime, stime, utimescaled, stimescaled;
|
||||
cputime_t gtime;
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
struct cputime prev_cputime;
|
||||
#endif
|
||||
struct prev_cputime prev_cputime;
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
seqlock_t vtime_seqlock;
|
||||
unsigned long long vtime_snap;
|
||||
@ -2214,13 +2215,6 @@ static inline void calc_load_enter_idle(void) { }
|
||||
static inline void calc_load_exit_idle(void) { }
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
#ifndef CONFIG_CPUMASK_OFFSTACK
|
||||
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||
{
|
||||
return set_cpus_allowed_ptr(p, &new_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do not use outside of architecture code which knows its limitations.
|
||||
*
|
||||
@ -2897,12 +2891,6 @@ extern int _cond_resched(void);
|
||||
|
||||
extern int __cond_resched_lock(spinlock_t *lock);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
|
||||
#else
|
||||
#define PREEMPT_LOCK_OFFSET 0
|
||||
#endif
|
||||
|
||||
#define cond_resched_lock(lock) ({ \
|
||||
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
|
||||
__cond_resched_lock(lock); \
|
||||
|
@ -112,25 +112,13 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
|
||||
*
|
||||
* This can be thought of as a very heavy write lock, equivalent to
|
||||
* grabbing every spinlock in the kernel. */
|
||||
int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
||||
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
|
||||
|
||||
/**
|
||||
* __stop_machine: freeze the machine on all CPUs and run this function
|
||||
* @fn: the function to run
|
||||
* @data: the data ptr for the @fn
|
||||
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
|
||||
*
|
||||
* Description: This is a special version of the above, which assumes cpus
|
||||
* won't come or go while it's being called. Used by hotplug cpu.
|
||||
*/
|
||||
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
||||
|
||||
int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
|
||||
const struct cpumask *cpus);
|
||||
|
||||
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||
|
||||
static inline int __stop_machine(int (*fn)(void *), void *data,
|
||||
static inline int stop_machine(cpu_stop_fn_t fn, void *data,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -141,16 +129,10 @@ static inline int __stop_machine(int (*fn)(void *), void *data,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int stop_machine(int (*fn)(void *), void *data,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
return __stop_machine(fn, data, cpus);
|
||||
}
|
||||
|
||||
static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||
static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
return __stop_machine(fn, data, cpus);
|
||||
return stop_machine(fn, data, cpus);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||
|
@ -55,9 +55,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
|
||||
*/
|
||||
DECLARE_EVENT_CLASS(sched_wakeup_template,
|
||||
|
||||
TP_PROTO(struct task_struct *p, int success),
|
||||
TP_PROTO(struct task_struct *p),
|
||||
|
||||
TP_ARGS(__perf_task(p), success),
|
||||
TP_ARGS(__perf_task(p)),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array( char, comm, TASK_COMM_LEN )
|
||||
@ -71,25 +71,37 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
|
||||
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
__entry->success = success;
|
||||
__entry->success = 1; /* rudiment, kill when possible */
|
||||
__entry->target_cpu = task_cpu(p);
|
||||
),
|
||||
|
||||
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
|
||||
TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->success, __entry->target_cpu)
|
||||
__entry->target_cpu)
|
||||
);
|
||||
|
||||
/*
|
||||
* Tracepoint called when waking a task; this tracepoint is guaranteed to be
|
||||
* called from the waking context.
|
||||
*/
|
||||
DEFINE_EVENT(sched_wakeup_template, sched_waking,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p));
|
||||
|
||||
/*
|
||||
* Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
|
||||
* It it not always called from the waking context.
|
||||
*/
|
||||
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
|
||||
TP_PROTO(struct task_struct *p, int success),
|
||||
TP_ARGS(p, success));
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p));
|
||||
|
||||
/*
|
||||
* Tracepoint for waking up a new task:
|
||||
*/
|
||||
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
|
||||
TP_PROTO(struct task_struct *p, int success),
|
||||
TP_ARGS(p, success));
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p));
|
||||
|
||||
#ifdef CREATE_TRACE_POINTS
|
||||
static inline long __trace_sched_switch_state(struct task_struct *p)
|
||||
|
@ -402,7 +402,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
/*
|
||||
* So now all preempt/rcu users must observe !cpu_active().
|
||||
*/
|
||||
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
||||
err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
|
||||
|
@ -1072,6 +1072,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
|
||||
rcu_assign_pointer(tsk->sighand, sig);
|
||||
if (!sig)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&sig->count, 1);
|
||||
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
|
||||
return 0;
|
||||
@ -1133,6 +1134,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
|
||||
init_sigpending(&sig->shared_pending);
|
||||
INIT_LIST_HEAD(&sig->posix_timers);
|
||||
seqlock_init(&sig->stats_lock);
|
||||
prev_cputime_init(&sig->prev_cputime);
|
||||
|
||||
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
sig->real_timer.function = it_real_fn;
|
||||
@ -1340,9 +1342,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
|
||||
p->utime = p->stime = p->gtime = 0;
|
||||
p->utimescaled = p->stimescaled = 0;
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
p->prev_cputime.utime = p->prev_cputime.stime = 0;
|
||||
#endif
|
||||
prev_cputime_init(&p->prev_cputime);
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
seqlock_init(&p->vtime_seqlock);
|
||||
p->vtime_snap = 0;
|
||||
|
@ -327,16 +327,30 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_create_on_node);
|
||||
|
||||
static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
|
||||
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
|
||||
{
|
||||
/* Must have done schedule() in kthread() before we set_task_cpu */
|
||||
unsigned long flags;
|
||||
|
||||
if (!wait_task_inactive(p, state)) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* It's safe because the task is inactive. */
|
||||
do_set_cpus_allowed(p, cpumask_of(cpu));
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
do_set_cpus_allowed(p, mask);
|
||||
p->flags |= PF_NO_SETAFFINITY;
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
}
|
||||
|
||||
static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
|
||||
{
|
||||
__kthread_bind_mask(p, cpumask_of(cpu), state);
|
||||
}
|
||||
|
||||
void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
|
||||
{
|
||||
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1151,15 +1151,45 @@ static int migration_cpu_stop(void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
||||
/*
|
||||
* sched_class::set_cpus_allowed must do the below, but is not required to
|
||||
* actually call this function.
|
||||
*/
|
||||
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
if (p->sched_class->set_cpus_allowed)
|
||||
p->sched_class->set_cpus_allowed(p, new_mask);
|
||||
|
||||
cpumask_copy(&p->cpus_allowed, new_mask);
|
||||
p->nr_cpus_allowed = cpumask_weight(new_mask);
|
||||
}
|
||||
|
||||
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
struct rq *rq = task_rq(p);
|
||||
bool queued, running;
|
||||
|
||||
lockdep_assert_held(&p->pi_lock);
|
||||
|
||||
queued = task_on_rq_queued(p);
|
||||
running = task_current(rq, p);
|
||||
|
||||
if (queued) {
|
||||
/*
|
||||
* Because __kthread_bind() calls this on blocked tasks without
|
||||
* holding rq->lock.
|
||||
*/
|
||||
lockdep_assert_held(&rq->lock);
|
||||
dequeue_task(rq, p, 0);
|
||||
}
|
||||
if (running)
|
||||
put_prev_task(rq, p);
|
||||
|
||||
p->sched_class->set_cpus_allowed(p, new_mask);
|
||||
|
||||
if (running)
|
||||
p->sched_class->set_curr_task(rq);
|
||||
if (queued)
|
||||
enqueue_task(rq, p, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change a given task's CPU affinity. Migrate the thread to a
|
||||
* proper CPU and schedule it away if the CPU it's executing on
|
||||
@ -1169,7 +1199,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
||||
* task must not exit() & deallocate itself prematurely. The
|
||||
* call is not atomic; no spinlocks may be held.
|
||||
*/
|
||||
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
||||
static int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||
const struct cpumask *new_mask, bool check)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
@ -1178,6 +1209,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
|
||||
/*
|
||||
* Must re-check here, to close a race against __kthread_bind(),
|
||||
* sched_setaffinity() is not guaranteed to observe the flag.
|
||||
*/
|
||||
if (check && (p->flags & PF_NO_SETAFFINITY)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cpumask_equal(&p->cpus_allowed, new_mask))
|
||||
goto out;
|
||||
|
||||
@ -1214,6 +1254,11 @@ out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
return __set_cpus_allowed_ptr(p, new_mask, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
||||
|
||||
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
@ -1595,6 +1640,15 @@ static void update_avg(u64 *avg, u64 sample)
|
||||
s64 diff = sample - *avg;
|
||||
*avg += diff >> 3;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||
const struct cpumask *new_mask, bool check)
|
||||
{
|
||||
return set_cpus_allowed_ptr(p, new_mask);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static void
|
||||
@ -1654,9 +1708,9 @@ static void
|
||||
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
||||
{
|
||||
check_preempt_curr(rq, p, wake_flags);
|
||||
trace_sched_wakeup(p, true);
|
||||
|
||||
p->state = TASK_RUNNING;
|
||||
trace_sched_wakeup(p);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (p->sched_class->task_woken) {
|
||||
/*
|
||||
@ -1874,6 +1928,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
if (!(p->state & state))
|
||||
goto out;
|
||||
|
||||
trace_sched_waking(p);
|
||||
|
||||
success = 1; /* we're going to change ->state */
|
||||
cpu = task_cpu(p);
|
||||
|
||||
@ -1949,6 +2005,8 @@ static void try_to_wake_up_local(struct task_struct *p)
|
||||
if (!(p->state & TASK_NORMAL))
|
||||
goto out;
|
||||
|
||||
trace_sched_waking(p);
|
||||
|
||||
if (!task_on_rq_queued(p))
|
||||
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
||||
|
||||
@ -2016,9 +2074,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||
p->se.prev_sum_exec_runtime = 0;
|
||||
p->se.nr_migrations = 0;
|
||||
p->se.vruntime = 0;
|
||||
#ifdef CONFIG_SMP
|
||||
p->se.avg.decay_count = 0;
|
||||
#endif
|
||||
INIT_LIST_HEAD(&p->se.group_node);
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
@ -2303,11 +2358,11 @@ void wake_up_new_task(struct task_struct *p)
|
||||
#endif
|
||||
|
||||
/* Initialize new task's runnable average */
|
||||
init_task_runnable_average(p);
|
||||
init_entity_runnable_average(&p->se);
|
||||
rq = __task_rq_lock(p);
|
||||
activate_task(rq, p, 0);
|
||||
p->on_rq = TASK_ON_RQ_QUEUED;
|
||||
trace_sched_wakeup_new(p, true);
|
||||
trace_sched_wakeup_new(p);
|
||||
check_preempt_curr(rq, p, WF_FORK);
|
||||
#ifdef CONFIG_SMP
|
||||
if (p->sched_class->task_woken)
|
||||
@ -2469,7 +2524,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
|
||||
*/
|
||||
prev_state = prev->state;
|
||||
vtime_task_switch(prev);
|
||||
finish_arch_switch(prev);
|
||||
perf_event_task_sched_in(prev, current);
|
||||
finish_lock_switch(rq, prev);
|
||||
finish_arch_post_lock_switch();
|
||||
@ -4340,7 +4394,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
}
|
||||
#endif
|
||||
again:
|
||||
retval = set_cpus_allowed_ptr(p, new_mask);
|
||||
retval = __set_cpus_allowed_ptr(p, new_mask, true);
|
||||
|
||||
if (!retval) {
|
||||
cpuset_cpus_allowed(p, cpus_allowed);
|
||||
@ -4492,7 +4546,7 @@ SYSCALL_DEFINE0(sched_yield)
|
||||
|
||||
int __sched _cond_resched(void)
|
||||
{
|
||||
if (should_resched()) {
|
||||
if (should_resched(0)) {
|
||||
preempt_schedule_common();
|
||||
return 1;
|
||||
}
|
||||
@ -4510,7 +4564,7 @@ EXPORT_SYMBOL(_cond_resched);
|
||||
*/
|
||||
int __cond_resched_lock(spinlock_t *lock)
|
||||
{
|
||||
int resched = should_resched();
|
||||
int resched = should_resched(PREEMPT_LOCK_OFFSET);
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(lock);
|
||||
@ -4532,7 +4586,7 @@ int __sched __cond_resched_softirq(void)
|
||||
{
|
||||
BUG_ON(!in_softirq());
|
||||
|
||||
if (should_resched()) {
|
||||
if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
|
||||
local_bh_enable();
|
||||
preempt_schedule_common();
|
||||
local_bh_disable();
|
||||
@ -4865,7 +4919,8 @@ void init_idle(struct task_struct *idle, int cpu)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&idle->pi_lock, flags);
|
||||
raw_spin_lock(&rq->lock);
|
||||
|
||||
__sched_fork(0, idle);
|
||||
idle->state = TASK_RUNNING;
|
||||
@ -4891,7 +4946,8 @@ void init_idle(struct task_struct *idle, int cpu)
|
||||
#if defined(CONFIG_SMP)
|
||||
idle->on_cpu = 1;
|
||||
#endif
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
|
||||
|
||||
/* Set the preempt count _outside_ the spinlocks! */
|
||||
init_idle_preempt_count(idle, cpu);
|
||||
@ -5311,8 +5367,7 @@ static void register_sched_domain_sysctl(void)
|
||||
/* may be called multiple times per register */
|
||||
static void unregister_sched_domain_sysctl(void)
|
||||
{
|
||||
if (sd_sysctl_header)
|
||||
unregister_sysctl_table(sd_sysctl_header);
|
||||
unregister_sysctl_table(sd_sysctl_header);
|
||||
sd_sysctl_header = NULL;
|
||||
if (sd_ctl_dir[0].child)
|
||||
sd_free_ctl_entry(&sd_ctl_dir[0].child);
|
||||
@ -6445,8 +6500,10 @@ static void init_numa_topology_type(void)
|
||||
|
||||
n = sched_max_numa_distance;
|
||||
|
||||
if (n <= 1)
|
||||
if (sched_domains_numa_levels <= 1) {
|
||||
sched_numa_topology_type = NUMA_DIRECT;
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_online_node(a) {
|
||||
for_each_online_node(b) {
|
||||
|
@ -555,48 +555,43 @@ drop_precision:
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically advance counter to the new value. Interrupts, vcpu
|
||||
* scheduling, and scaling inaccuracies can cause cputime_advance
|
||||
* to be occasionally called with a new value smaller than counter.
|
||||
* Let's enforce atomicity.
|
||||
* Adjust tick based cputime random precision against scheduler runtime
|
||||
* accounting.
|
||||
*
|
||||
* Normally a caller will only go through this loop once, or not
|
||||
* at all in case a previous caller updated counter the same jiffy.
|
||||
*/
|
||||
static void cputime_advance(cputime_t *counter, cputime_t new)
|
||||
{
|
||||
cputime_t old;
|
||||
|
||||
while (new > (old = READ_ONCE(*counter)))
|
||||
cmpxchg_cputime(counter, old, new);
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust tick based cputime random precision against scheduler
|
||||
* runtime accounting.
|
||||
* Tick based cputime accounting depend on random scheduling timeslices of a
|
||||
* task to be interrupted or not by the timer. Depending on these
|
||||
* circumstances, the number of these interrupts may be over or
|
||||
* under-optimistic, matching the real user and system cputime with a variable
|
||||
* precision.
|
||||
*
|
||||
* Fix this by scaling these tick based values against the total runtime
|
||||
* accounted by the CFS scheduler.
|
||||
*
|
||||
* This code provides the following guarantees:
|
||||
*
|
||||
* stime + utime == rtime
|
||||
* stime_i+1 >= stime_i, utime_i+1 >= utime_i
|
||||
*
|
||||
* Assuming that rtime_i+1 >= rtime_i.
|
||||
*/
|
||||
static void cputime_adjust(struct task_cputime *curr,
|
||||
struct cputime *prev,
|
||||
struct prev_cputime *prev,
|
||||
cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
cputime_t rtime, stime, utime;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Tick based cputime accounting depend on random scheduling
|
||||
* timeslices of a task to be interrupted or not by the timer.
|
||||
* Depending on these circumstances, the number of these interrupts
|
||||
* may be over or under-optimistic, matching the real user and system
|
||||
* cputime with a variable precision.
|
||||
*
|
||||
* Fix this by scaling these tick based values against the total
|
||||
* runtime accounted by the CFS scheduler.
|
||||
*/
|
||||
/* Serialize concurrent callers such that we can honour our guarantees */
|
||||
raw_spin_lock_irqsave(&prev->lock, flags);
|
||||
rtime = nsecs_to_cputime(curr->sum_exec_runtime);
|
||||
|
||||
/*
|
||||
* Update userspace visible utime/stime values only if actual execution
|
||||
* time is bigger than already exported. Note that can happen, that we
|
||||
* provided bigger values due to scaling inaccuracy on big numbers.
|
||||
* This is possible under two circumstances:
|
||||
* - rtime isn't monotonic after all (a bug);
|
||||
* - we got reordered by the lock.
|
||||
*
|
||||
* In both cases this acts as a filter such that the rest of the code
|
||||
* can assume it is monotonic regardless of anything else.
|
||||
*/
|
||||
if (prev->stime + prev->utime >= rtime)
|
||||
goto out;
|
||||
@ -606,22 +601,46 @@ static void cputime_adjust(struct task_cputime *curr,
|
||||
|
||||
if (utime == 0) {
|
||||
stime = rtime;
|
||||
} else if (stime == 0) {
|
||||
utime = rtime;
|
||||
} else {
|
||||
cputime_t total = stime + utime;
|
||||
|
||||
stime = scale_stime((__force u64)stime,
|
||||
(__force u64)rtime, (__force u64)total);
|
||||
utime = rtime - stime;
|
||||
goto update;
|
||||
}
|
||||
|
||||
cputime_advance(&prev->stime, stime);
|
||||
cputime_advance(&prev->utime, utime);
|
||||
if (stime == 0) {
|
||||
utime = rtime;
|
||||
goto update;
|
||||
}
|
||||
|
||||
stime = scale_stime((__force u64)stime, (__force u64)rtime,
|
||||
(__force u64)(stime + utime));
|
||||
|
||||
/*
|
||||
* Make sure stime doesn't go backwards; this preserves monotonicity
|
||||
* for utime because rtime is monotonic.
|
||||
*
|
||||
* utime_i+1 = rtime_i+1 - stime_i
|
||||
* = rtime_i+1 - (rtime_i - utime_i)
|
||||
* = (rtime_i+1 - rtime_i) + utime_i
|
||||
* >= utime_i
|
||||
*/
|
||||
if (stime < prev->stime)
|
||||
stime = prev->stime;
|
||||
utime = rtime - stime;
|
||||
|
||||
/*
|
||||
* Make sure utime doesn't go backwards; this still preserves
|
||||
* monotonicity for stime, analogous argument to above.
|
||||
*/
|
||||
if (utime < prev->utime) {
|
||||
utime = prev->utime;
|
||||
stime = rtime - utime;
|
||||
}
|
||||
|
||||
update:
|
||||
prev->stime = stime;
|
||||
prev->utime = utime;
|
||||
out:
|
||||
*ut = prev->utime;
|
||||
*st = prev->stime;
|
||||
raw_spin_unlock_irqrestore(&prev->lock, flags);
|
||||
}
|
||||
|
||||
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
|
@ -953,7 +953,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
/*
|
||||
* Use the scheduling parameters of the top pi-waiter
|
||||
* task if we have one and its (relative) deadline is
|
||||
* task if we have one and its (absolute) deadline is
|
||||
* smaller than our one... OTW we keep our runtime and
|
||||
* deadline.
|
||||
*/
|
||||
@ -1563,7 +1563,7 @@ out:
|
||||
|
||||
static void push_dl_tasks(struct rq *rq)
|
||||
{
|
||||
/* Terminates as it moves a -deadline task */
|
||||
/* push_dl_task() will return true if it moved a -deadline task */
|
||||
while (push_dl_task(rq))
|
||||
;
|
||||
}
|
||||
@ -1657,7 +1657,6 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
!test_tsk_need_resched(rq->curr) &&
|
||||
has_pushable_dl_tasks(rq) &&
|
||||
p->nr_cpus_allowed > 1 &&
|
||||
dl_task(rq->curr) &&
|
||||
(rq->curr->nr_cpus_allowed < 2 ||
|
||||
@ -1669,9 +1668,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
|
||||
static void set_cpus_allowed_dl(struct task_struct *p,
|
||||
const struct cpumask *new_mask)
|
||||
{
|
||||
struct rq *rq;
|
||||
struct root_domain *src_rd;
|
||||
int weight;
|
||||
struct rq *rq;
|
||||
|
||||
BUG_ON(!dl_task(p));
|
||||
|
||||
@ -1697,37 +1695,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
|
||||
raw_spin_unlock(&src_dl_b->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update only if the task is actually running (i.e.,
|
||||
* it is on the rq AND it is not throttled).
|
||||
*/
|
||||
if (!on_dl_rq(&p->dl))
|
||||
return;
|
||||
|
||||
weight = cpumask_weight(new_mask);
|
||||
|
||||
/*
|
||||
* Only update if the process changes its state from whether it
|
||||
* can migrate or not.
|
||||
*/
|
||||
if ((p->nr_cpus_allowed > 1) == (weight > 1))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The process used to be able to migrate OR it can now migrate
|
||||
*/
|
||||
if (weight <= 1) {
|
||||
if (!task_current(rq, p))
|
||||
dequeue_pushable_dl_task(rq, p);
|
||||
BUG_ON(!rq->dl.dl_nr_migratory);
|
||||
rq->dl.dl_nr_migratory--;
|
||||
} else {
|
||||
if (!task_current(rq, p))
|
||||
enqueue_pushable_dl_task(rq, p);
|
||||
rq->dl.dl_nr_migratory++;
|
||||
}
|
||||
|
||||
update_dl_migration(&rq->dl);
|
||||
set_cpus_allowed_common(p, new_mask);
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
|
@ -68,13 +68,8 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
|
||||
#define PN(F) \
|
||||
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
|
||||
|
||||
if (!se) {
|
||||
struct sched_avg *avg = &cpu_rq(cpu)->avg;
|
||||
P(avg->runnable_avg_sum);
|
||||
P(avg->avg_period);
|
||||
if (!se)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
PN(se->exec_start);
|
||||
PN(se->vruntime);
|
||||
@ -93,12 +88,8 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
|
||||
#endif
|
||||
P(se->load.weight);
|
||||
#ifdef CONFIG_SMP
|
||||
P(se->avg.runnable_avg_sum);
|
||||
P(se->avg.running_avg_sum);
|
||||
P(se->avg.avg_period);
|
||||
P(se->avg.load_avg_contrib);
|
||||
P(se->avg.utilization_avg_contrib);
|
||||
P(se->avg.decay_count);
|
||||
P(se->avg.load_avg);
|
||||
P(se->avg.util_avg);
|
||||
#endif
|
||||
#undef PN
|
||||
#undef P
|
||||
@ -214,21 +205,21 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
|
||||
#ifdef CONFIG_SMP
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg",
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
|
||||
cfs_rq->avg.load_avg);
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
|
||||
cfs_rq->runnable_load_avg);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
|
||||
cfs_rq->blocked_load_avg);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "utilization_load_avg",
|
||||
cfs_rq->utilization_load_avg);
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
|
||||
cfs_rq->avg.util_avg);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "removed_load_avg",
|
||||
atomic_long_read(&cfs_rq->removed_load_avg));
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "removed_util_avg",
|
||||
atomic_long_read(&cfs_rq->removed_util_avg));
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
|
||||
cfs_rq->tg_load_contrib);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
|
||||
cfs_rq->tg_runnable_contrib);
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
|
||||
cfs_rq->tg_load_avg_contrib);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
|
||||
atomic_long_read(&cfs_rq->tg->load_avg));
|
||||
SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
|
||||
atomic_read(&cfs_rq->tg->runnable_avg));
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_CFS_BANDWIDTH
|
||||
@ -636,12 +627,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
||||
|
||||
P(se.load.weight);
|
||||
#ifdef CONFIG_SMP
|
||||
P(se.avg.runnable_avg_sum);
|
||||
P(se.avg.running_avg_sum);
|
||||
P(se.avg.avg_period);
|
||||
P(se.avg.load_avg_contrib);
|
||||
P(se.avg.utilization_avg_contrib);
|
||||
P(se.avg.decay_count);
|
||||
P(se.avg.load_sum);
|
||||
P(se.avg.util_sum);
|
||||
P(se.avg.load_avg);
|
||||
P(se.avg.util_avg);
|
||||
P(se.avg.last_update_time);
|
||||
#endif
|
||||
P(policy);
|
||||
P(prio);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -79,20 +79,12 @@ SCHED_FEAT(LB_MIN, false)
|
||||
* numa_balancing=
|
||||
*/
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
SCHED_FEAT(NUMA, false)
|
||||
|
||||
/*
|
||||
* NUMA_FAVOUR_HIGHER will favor moving tasks towards nodes where a
|
||||
* higher number of hinting faults are recorded during active load
|
||||
* balancing.
|
||||
* NUMA will favor moving tasks towards nodes where a higher number of
|
||||
* hinting faults are recorded during active load balancing. It will
|
||||
* resist moving tasks towards nodes where a lower number of hinting
|
||||
* faults have been recorded.
|
||||
*/
|
||||
SCHED_FEAT(NUMA_FAVOUR_HIGHER, true)
|
||||
|
||||
/*
|
||||
* NUMA_RESIST_LOWER will resist moving tasks towards nodes where a
|
||||
* lower number of hinting faults have been recorded. As this has
|
||||
* the potential to prevent a task ever migrating to a new node
|
||||
* due to CPU overload it is disabled by default.
|
||||
*/
|
||||
SCHED_FEAT(NUMA_RESIST_LOWER, false)
|
||||
SCHED_FEAT(NUMA, true)
|
||||
#endif
|
||||
|
@ -83,10 +83,13 @@ void __weak arch_cpu_idle(void)
|
||||
*/
|
||||
void default_idle_call(void)
|
||||
{
|
||||
if (current_clr_polling_and_test())
|
||||
if (current_clr_polling_and_test()) {
|
||||
local_irq_enable();
|
||||
else
|
||||
} else {
|
||||
stop_critical_timings();
|
||||
arch_cpu_idle();
|
||||
start_critical_timings();
|
||||
}
|
||||
}
|
||||
|
||||
static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
@ -140,12 +143,6 @@ static void cpuidle_idle_call(void)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* During the idle period, stop measuring the disabled irqs
|
||||
* critical sections latencies
|
||||
*/
|
||||
stop_critical_timings();
|
||||
|
||||
/*
|
||||
* Tell the RCU framework we are entering an idle section,
|
||||
* so no more rcu read side critical sections and one more
|
||||
@ -198,7 +195,6 @@ exit_idle:
|
||||
local_irq_enable();
|
||||
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU(bool, cpu_dead_idle);
|
||||
|
@ -96,6 +96,7 @@ const struct sched_class idle_sched_class = {
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.select_task_rq = select_task_rq_idle,
|
||||
.set_cpus_allowed = set_cpus_allowed_common,
|
||||
#endif
|
||||
|
||||
.set_curr_task = set_curr_task_idle,
|
||||
|
@ -2069,7 +2069,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
!test_tsk_need_resched(rq->curr) &&
|
||||
has_pushable_tasks(rq) &&
|
||||
p->nr_cpus_allowed > 1 &&
|
||||
(dl_task(rq->curr) || rt_task(rq->curr)) &&
|
||||
(rq->curr->nr_cpus_allowed < 2 ||
|
||||
@ -2077,45 +2076,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
|
||||
push_rt_tasks(rq);
|
||||
}
|
||||
|
||||
static void set_cpus_allowed_rt(struct task_struct *p,
|
||||
const struct cpumask *new_mask)
|
||||
{
|
||||
struct rq *rq;
|
||||
int weight;
|
||||
|
||||
BUG_ON(!rt_task(p));
|
||||
|
||||
if (!task_on_rq_queued(p))
|
||||
return;
|
||||
|
||||
weight = cpumask_weight(new_mask);
|
||||
|
||||
/*
|
||||
* Only update if the process changes its state from whether it
|
||||
* can migrate or not.
|
||||
*/
|
||||
if ((p->nr_cpus_allowed > 1) == (weight > 1))
|
||||
return;
|
||||
|
||||
rq = task_rq(p);
|
||||
|
||||
/*
|
||||
* The process used to be able to migrate OR it can now migrate
|
||||
*/
|
||||
if (weight <= 1) {
|
||||
if (!task_current(rq, p))
|
||||
dequeue_pushable_task(rq, p);
|
||||
BUG_ON(!rq->rt.rt_nr_migratory);
|
||||
rq->rt.rt_nr_migratory--;
|
||||
} else {
|
||||
if (!task_current(rq, p))
|
||||
enqueue_pushable_task(rq, p);
|
||||
rq->rt.rt_nr_migratory++;
|
||||
}
|
||||
|
||||
update_rt_migration(&rq->rt);
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
static void rq_online_rt(struct rq *rq)
|
||||
{
|
||||
@ -2324,7 +2284,7 @@ const struct sched_class rt_sched_class = {
|
||||
#ifdef CONFIG_SMP
|
||||
.select_task_rq = select_task_rq_rt,
|
||||
|
||||
.set_cpus_allowed = set_cpus_allowed_rt,
|
||||
.set_cpus_allowed = set_cpus_allowed_common,
|
||||
.rq_online = rq_online_rt,
|
||||
.rq_offline = rq_offline_rt,
|
||||
.task_woken = task_woken_rt,
|
||||
|
@ -245,7 +245,6 @@ struct task_group {
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
atomic_long_t load_avg;
|
||||
atomic_t runnable_avg;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@ -366,27 +365,20 @@ struct cfs_rq {
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* CFS Load tracking
|
||||
* Under CFS, load is tracked on a per-entity basis and aggregated up.
|
||||
* This allows for the description of both thread and group usage (in
|
||||
* the FAIR_GROUP_SCHED case).
|
||||
* runnable_load_avg is the sum of the load_avg_contrib of the
|
||||
* sched_entities on the rq.
|
||||
* blocked_load_avg is similar to runnable_load_avg except that its
|
||||
* the blocked sched_entities on the rq.
|
||||
* utilization_load_avg is the sum of the average running time of the
|
||||
* sched_entities on the rq.
|
||||
* CFS load tracking
|
||||
*/
|
||||
unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg;
|
||||
atomic64_t decay_counter;
|
||||
u64 last_decay;
|
||||
atomic_long_t removed_load;
|
||||
struct sched_avg avg;
|
||||
u64 runnable_load_sum;
|
||||
unsigned long runnable_load_avg;
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
unsigned long tg_load_avg_contrib;
|
||||
#endif
|
||||
atomic_long_t removed_load_avg, removed_util_avg;
|
||||
#ifndef CONFIG_64BIT
|
||||
u64 load_last_update_time_copy;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/* Required to track per-cpu representation of a task_group */
|
||||
u32 tg_runnable_contrib;
|
||||
unsigned long tg_load_contrib;
|
||||
|
||||
/*
|
||||
* h_load = weight * f(tg)
|
||||
*
|
||||
@ -595,8 +587,6 @@ struct rq {
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/* list of leaf cfs_rq on this cpu: */
|
||||
struct list_head leaf_cfs_rq_list;
|
||||
|
||||
struct sched_avg avg;
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
/*
|
||||
@ -1065,9 +1055,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
|
||||
#ifndef prepare_arch_switch
|
||||
# define prepare_arch_switch(next) do { } while (0)
|
||||
#endif
|
||||
#ifndef finish_arch_switch
|
||||
# define finish_arch_switch(prev) do { } while (0)
|
||||
#endif
|
||||
#ifndef finish_arch_post_lock_switch
|
||||
# define finish_arch_post_lock_switch() do { } while (0)
|
||||
#endif
|
||||
@ -1268,6 +1255,8 @@ extern void trigger_load_balance(struct rq *rq);
|
||||
extern void idle_enter_fair(struct rq *this_rq);
|
||||
extern void idle_exit_fair(struct rq *this_rq);
|
||||
|
||||
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
|
||||
|
||||
#else
|
||||
|
||||
static inline void idle_enter_fair(struct rq *rq) { }
|
||||
@ -1319,7 +1308,7 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
|
||||
|
||||
unsigned long to_ratio(u64 period, u64 runtime);
|
||||
|
||||
extern void init_task_runnable_average(struct task_struct *p);
|
||||
extern void init_entity_runnable_average(struct sched_entity *se);
|
||||
|
||||
static inline void add_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
|
@ -123,6 +123,7 @@ const struct sched_class stop_sched_class = {
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.select_task_rq = select_task_rq_stop,
|
||||
.set_cpus_allowed = set_cpus_allowed_common,
|
||||
#endif
|
||||
|
||||
.set_curr_task = set_curr_task_stop,
|
||||
|
@ -35,13 +35,16 @@ struct cpu_stop_done {
|
||||
|
||||
/* the actual stopper, one per every possible cpu, enabled on online cpus */
|
||||
struct cpu_stopper {
|
||||
struct task_struct *thread;
|
||||
|
||||
spinlock_t lock;
|
||||
bool enabled; /* is this stopper enabled? */
|
||||
struct list_head works; /* list of pending works */
|
||||
|
||||
struct cpu_stop_work stop_work; /* for stop_cpus */
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
|
||||
static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
|
||||
static bool stop_machine_initialized = false;
|
||||
|
||||
/*
|
||||
@ -74,7 +77,6 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
|
||||
static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
||||
{
|
||||
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
||||
struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
@ -82,7 +84,7 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
||||
|
||||
if (stopper->enabled) {
|
||||
list_add_tail(&work->list, &stopper->works);
|
||||
wake_up_process(p);
|
||||
wake_up_process(stopper->thread);
|
||||
} else
|
||||
cpu_stop_signal_done(work->done, false);
|
||||
|
||||
@ -139,7 +141,7 @@ enum multi_stop_state {
|
||||
};
|
||||
|
||||
struct multi_stop_data {
|
||||
int (*fn)(void *);
|
||||
cpu_stop_fn_t fn;
|
||||
void *data;
|
||||
/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
|
||||
unsigned int num_threads;
|
||||
@ -293,7 +295,6 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
||||
|
||||
/* static data for stop_cpus */
|
||||
static DEFINE_MUTEX(stop_cpus_mutex);
|
||||
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
|
||||
|
||||
static void queue_stop_cpus_work(const struct cpumask *cpumask,
|
||||
cpu_stop_fn_t fn, void *arg,
|
||||
@ -302,22 +303,19 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
|
||||
struct cpu_stop_work *work;
|
||||
unsigned int cpu;
|
||||
|
||||
/* initialize works and done */
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
work = &per_cpu(stop_cpus_work, cpu);
|
||||
work->fn = fn;
|
||||
work->arg = arg;
|
||||
work->done = done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable preemption while queueing to avoid getting
|
||||
* preempted by a stopper which might wait for other stoppers
|
||||
* to enter @fn which can lead to deadlock.
|
||||
*/
|
||||
lg_global_lock(&stop_cpus_lock);
|
||||
for_each_cpu(cpu, cpumask)
|
||||
cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
work = &per_cpu(cpu_stopper.stop_work, cpu);
|
||||
work->fn = fn;
|
||||
work->arg = arg;
|
||||
work->done = done;
|
||||
cpu_stop_queue_work(cpu, work);
|
||||
}
|
||||
lg_global_unlock(&stop_cpus_lock);
|
||||
}
|
||||
|
||||
@ -458,19 +456,21 @@ extern void sched_set_stop_task(int cpu, struct task_struct *stop);
|
||||
|
||||
static void cpu_stop_create(unsigned int cpu)
|
||||
{
|
||||
sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
|
||||
sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
|
||||
}
|
||||
|
||||
static void cpu_stop_park(unsigned int cpu)
|
||||
{
|
||||
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
||||
struct cpu_stop_work *work;
|
||||
struct cpu_stop_work *work, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
/* drain remaining works */
|
||||
spin_lock_irqsave(&stopper->lock, flags);
|
||||
list_for_each_entry(work, &stopper->works, list)
|
||||
list_for_each_entry_safe(work, tmp, &stopper->works, list) {
|
||||
list_del_init(&work->list);
|
||||
cpu_stop_signal_done(work->done, false);
|
||||
}
|
||||
stopper->enabled = false;
|
||||
spin_unlock_irqrestore(&stopper->lock, flags);
|
||||
}
|
||||
@ -485,7 +485,7 @@ static void cpu_stop_unpark(unsigned int cpu)
|
||||
}
|
||||
|
||||
static struct smp_hotplug_thread cpu_stop_threads = {
|
||||
.store = &cpu_stopper_task,
|
||||
.store = &cpu_stopper.thread,
|
||||
.thread_should_run = cpu_stop_should_run,
|
||||
.thread_fn = cpu_stopper_thread,
|
||||
.thread_comm = "migration/%u",
|
||||
@ -515,7 +515,7 @@ early_initcall(cpu_stop_init);
|
||||
|
||||
#ifdef CONFIG_STOP_MACHINE
|
||||
|
||||
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
||||
static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
|
||||
{
|
||||
struct multi_stop_data msdata = {
|
||||
.fn = fn,
|
||||
@ -548,7 +548,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
||||
return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
|
||||
}
|
||||
|
||||
int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
||||
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -582,7 +582,7 @@ EXPORT_SYMBOL_GPL(stop_machine);
|
||||
* 0 if all executions of @fn returned 0, any non zero return value if any
|
||||
* returned non zero.
|
||||
*/
|
||||
int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
struct multi_stop_data msdata = { .fn = fn, .data = data,
|
||||
|
@ -26,7 +26,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
|
||||
}
|
||||
|
||||
static void
|
||||
probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
|
||||
probe_sched_wakeup(void *ignore, struct task_struct *wakee)
|
||||
{
|
||||
if (unlikely(!sched_ref))
|
||||
return;
|
||||
|
@ -514,7 +514,7 @@ static void wakeup_reset(struct trace_array *tr)
|
||||
}
|
||||
|
||||
static void
|
||||
probe_wakeup(void *ignore, struct task_struct *p, int success)
|
||||
probe_wakeup(void *ignore, struct task_struct *p)
|
||||
{
|
||||
struct trace_array_cpu *data;
|
||||
int cpu = smp_processor_id();
|
||||
|
@ -1714,9 +1714,7 @@ static struct worker *create_worker(struct worker_pool *pool)
|
||||
goto fail;
|
||||
|
||||
set_user_nice(worker->task, pool->attrs->nice);
|
||||
|
||||
/* prevent userland from meddling with cpumask of workqueue workers */
|
||||
worker->task->flags |= PF_NO_SETAFFINITY;
|
||||
kthread_bind_mask(worker->task, pool->attrs->cpumask);
|
||||
|
||||
/* successful, attach the worker to the pool */
|
||||
worker_attach_to_pool(worker, pool);
|
||||
@ -3856,7 +3854,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||
}
|
||||
|
||||
wq->rescuer = rescuer;
|
||||
rescuer->task->flags |= PF_NO_SETAFFINITY;
|
||||
kthread_bind_mask(rescuer->task, cpu_possible_mask);
|
||||
wake_up_process(rescuer->task);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user