mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Fixes and two late cleanups" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/cleanups: Add load balance cpumask pointer to 'struct lb_env' sched: Fix comment about PREEMPT_ACTIVE bit location sched: Fix minor code style issues sched: Use task_rq_unlock() in __sched_setscheduler() sched/numa: Add SD_PERFER_SIBLING to CPU domain
This commit is contained in:
commit
fcc1d2a9ce
@ -22,7 +22,7 @@
|
|||||||
*
|
*
|
||||||
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
|
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
|
||||||
* - bit 26 is the NMI_MASK
|
* - bit 26 is the NMI_MASK
|
||||||
* - bit 28 is the PREEMPT_ACTIVE flag
|
* - bit 27 is the PREEMPT_ACTIVE flag
|
||||||
*
|
*
|
||||||
* PREEMPT_MASK: 0x000000ff
|
* PREEMPT_MASK: 0x000000ff
|
||||||
* SOFTIRQ_MASK: 0x0000ff00
|
* SOFTIRQ_MASK: 0x0000ff00
|
||||||
|
@ -164,6 +164,7 @@ int arch_update_cpu_topology(void);
|
|||||||
| 0*SD_SHARE_CPUPOWER \
|
| 0*SD_SHARE_CPUPOWER \
|
||||||
| 0*SD_SHARE_PKG_RESOURCES \
|
| 0*SD_SHARE_PKG_RESOURCES \
|
||||||
| 0*SD_SERIALIZE \
|
| 0*SD_SERIALIZE \
|
||||||
|
| 1*SD_PREFER_SIBLING \
|
||||||
, \
|
, \
|
||||||
.last_balance = jiffies, \
|
.last_balance = jiffies, \
|
||||||
.balance_interval = 1, \
|
.balance_interval = 1, \
|
||||||
|
@ -4340,9 +4340,7 @@ recheck:
|
|||||||
*/
|
*/
|
||||||
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
|
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
|
||||||
param->sched_priority == p->rt_priority))) {
|
param->sched_priority == p->rt_priority))) {
|
||||||
|
task_rq_unlock(rq, p, &flags);
|
||||||
__task_rq_unlock(rq);
|
|
||||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,8 +65,8 @@ static int convert_prio(int prio)
|
|||||||
int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
||||||
struct cpumask *lowest_mask)
|
struct cpumask *lowest_mask)
|
||||||
{
|
{
|
||||||
int idx = 0;
|
int idx = 0;
|
||||||
int task_pri = convert_prio(p->prio);
|
int task_pri = convert_prio(p->prio);
|
||||||
|
|
||||||
if (task_pri >= MAX_RT_PRIO)
|
if (task_pri >= MAX_RT_PRIO)
|
||||||
return 0;
|
return 0;
|
||||||
@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
|||||||
*/
|
*/
|
||||||
void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||||
{
|
{
|
||||||
int *currpri = &cp->cpu_to_pri[cpu];
|
int *currpri = &cp->cpu_to_pri[cpu];
|
||||||
int oldpri = *currpri;
|
int oldpri = *currpri;
|
||||||
int do_mb = 0;
|
int do_mb = 0;
|
||||||
|
|
||||||
newpri = convert_prio(newpri);
|
newpri = convert_prio(newpri);
|
||||||
|
|
||||||
|
@ -3069,6 +3069,9 @@ struct lb_env {
|
|||||||
int new_dst_cpu;
|
int new_dst_cpu;
|
||||||
enum cpu_idle_type idle;
|
enum cpu_idle_type idle;
|
||||||
long imbalance;
|
long imbalance;
|
||||||
|
/* The set of CPUs under consideration for load-balancing */
|
||||||
|
struct cpumask *cpus;
|
||||||
|
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
unsigned int loop;
|
unsigned int loop;
|
||||||
@ -3653,8 +3656,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
|
|||||||
*/
|
*/
|
||||||
static inline void update_sg_lb_stats(struct lb_env *env,
|
static inline void update_sg_lb_stats(struct lb_env *env,
|
||||||
struct sched_group *group, int load_idx,
|
struct sched_group *group, int load_idx,
|
||||||
int local_group, const struct cpumask *cpus,
|
int local_group, int *balance, struct sg_lb_stats *sgs)
|
||||||
int *balance, struct sg_lb_stats *sgs)
|
|
||||||
{
|
{
|
||||||
unsigned long nr_running, max_nr_running, min_nr_running;
|
unsigned long nr_running, max_nr_running, min_nr_running;
|
||||||
unsigned long load, max_cpu_load, min_cpu_load;
|
unsigned long load, max_cpu_load, min_cpu_load;
|
||||||
@ -3671,7 +3673,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|||||||
max_nr_running = 0;
|
max_nr_running = 0;
|
||||||
min_nr_running = ~0UL;
|
min_nr_running = ~0UL;
|
||||||
|
|
||||||
for_each_cpu_and(i, sched_group_cpus(group), cpus) {
|
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
|
||||||
struct rq *rq = cpu_rq(i);
|
struct rq *rq = cpu_rq(i);
|
||||||
|
|
||||||
nr_running = rq->nr_running;
|
nr_running = rq->nr_running;
|
||||||
@ -3800,8 +3802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
|
|||||||
* @sds: variable to hold the statistics for this sched_domain.
|
* @sds: variable to hold the statistics for this sched_domain.
|
||||||
*/
|
*/
|
||||||
static inline void update_sd_lb_stats(struct lb_env *env,
|
static inline void update_sd_lb_stats(struct lb_env *env,
|
||||||
const struct cpumask *cpus,
|
int *balance, struct sd_lb_stats *sds)
|
||||||
int *balance, struct sd_lb_stats *sds)
|
|
||||||
{
|
{
|
||||||
struct sched_domain *child = env->sd->child;
|
struct sched_domain *child = env->sd->child;
|
||||||
struct sched_group *sg = env->sd->groups;
|
struct sched_group *sg = env->sd->groups;
|
||||||
@ -3818,8 +3819,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
|
|||||||
|
|
||||||
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
|
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
|
||||||
memset(&sgs, 0, sizeof(sgs));
|
memset(&sgs, 0, sizeof(sgs));
|
||||||
update_sg_lb_stats(env, sg, load_idx, local_group,
|
update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
|
||||||
cpus, balance, &sgs);
|
|
||||||
|
|
||||||
if (local_group && !(*balance))
|
if (local_group && !(*balance))
|
||||||
return;
|
return;
|
||||||
@ -4055,7 +4055,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
|
|||||||
* to restore balance.
|
* to restore balance.
|
||||||
*
|
*
|
||||||
* @env: The load balancing environment.
|
* @env: The load balancing environment.
|
||||||
* @cpus: The set of CPUs under consideration for load-balancing.
|
|
||||||
* @balance: Pointer to a variable indicating if this_cpu
|
* @balance: Pointer to a variable indicating if this_cpu
|
||||||
* is the appropriate cpu to perform load balancing at this_level.
|
* is the appropriate cpu to perform load balancing at this_level.
|
||||||
*
|
*
|
||||||
@ -4065,7 +4064,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
|
|||||||
* put to idle by rebalancing its tasks onto our group.
|
* put to idle by rebalancing its tasks onto our group.
|
||||||
*/
|
*/
|
||||||
static struct sched_group *
|
static struct sched_group *
|
||||||
find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
|
find_busiest_group(struct lb_env *env, int *balance)
|
||||||
{
|
{
|
||||||
struct sd_lb_stats sds;
|
struct sd_lb_stats sds;
|
||||||
|
|
||||||
@ -4075,7 +4074,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
|
|||||||
* Compute the various statistics relavent for load balancing at
|
* Compute the various statistics relavent for load balancing at
|
||||||
* this level.
|
* this level.
|
||||||
*/
|
*/
|
||||||
update_sd_lb_stats(env, cpus, balance, &sds);
|
update_sd_lb_stats(env, balance, &sds);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this_cpu is not the appropriate cpu to perform load balancing at
|
* this_cpu is not the appropriate cpu to perform load balancing at
|
||||||
@ -4155,8 +4154,7 @@ ret:
|
|||||||
* find_busiest_queue - find the busiest runqueue among the cpus in group.
|
* find_busiest_queue - find the busiest runqueue among the cpus in group.
|
||||||
*/
|
*/
|
||||||
static struct rq *find_busiest_queue(struct lb_env *env,
|
static struct rq *find_busiest_queue(struct lb_env *env,
|
||||||
struct sched_group *group,
|
struct sched_group *group)
|
||||||
const struct cpumask *cpus)
|
|
||||||
{
|
{
|
||||||
struct rq *busiest = NULL, *rq;
|
struct rq *busiest = NULL, *rq;
|
||||||
unsigned long max_load = 0;
|
unsigned long max_load = 0;
|
||||||
@ -4171,7 +4169,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
|
|||||||
if (!capacity)
|
if (!capacity)
|
||||||
capacity = fix_small_capacity(env->sd, group);
|
capacity = fix_small_capacity(env->sd, group);
|
||||||
|
|
||||||
if (!cpumask_test_cpu(i, cpus))
|
if (!cpumask_test_cpu(i, env->cpus))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
rq = cpu_rq(i);
|
rq = cpu_rq(i);
|
||||||
@ -4252,6 +4250,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|||||||
.dst_grpmask = sched_group_cpus(sd->groups),
|
.dst_grpmask = sched_group_cpus(sd->groups),
|
||||||
.idle = idle,
|
.idle = idle,
|
||||||
.loop_break = sched_nr_migrate_break,
|
.loop_break = sched_nr_migrate_break,
|
||||||
|
.cpus = cpus,
|
||||||
};
|
};
|
||||||
|
|
||||||
cpumask_copy(cpus, cpu_active_mask);
|
cpumask_copy(cpus, cpu_active_mask);
|
||||||
@ -4260,7 +4259,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|||||||
schedstat_inc(sd, lb_count[idle]);
|
schedstat_inc(sd, lb_count[idle]);
|
||||||
|
|
||||||
redo:
|
redo:
|
||||||
group = find_busiest_group(&env, cpus, balance);
|
group = find_busiest_group(&env, balance);
|
||||||
|
|
||||||
if (*balance == 0)
|
if (*balance == 0)
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
@ -4270,7 +4269,7 @@ redo:
|
|||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
}
|
}
|
||||||
|
|
||||||
busiest = find_busiest_queue(&env, group, cpus);
|
busiest = find_busiest_queue(&env, group);
|
||||||
if (!busiest) {
|
if (!busiest) {
|
||||||
schedstat_inc(sd, lb_nobusyq[idle]);
|
schedstat_inc(sd, lb_nobusyq[idle]);
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
|
Loading…
Reference in New Issue
Block a user