sched/deadline: Collect sched_dl_entity initialization

Create a single function that initializes a sched_dl_entity.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Daniel Bristot de Oliveira <bristot@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Phil Auld <pauld@redhat.com>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Link: https://lkml.kernel.org/r/51acc695eecf0a1a2f78f9a044e11ffd9b316bcf.1699095159.git.bristot@kernel.org
This commit is contained in:
Peter Zijlstra 2023-11-04 11:59:19 +01:00
parent c708a4dc5a
commit 9e07d45c52
3 changed files with 17 additions and 15 deletions

View File

@ -4511,10 +4511,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
memset(&p->stats, 0, sizeof(p->stats));
#endif
RB_CLEAR_NODE(&p->dl.rb_node);
init_dl_task_timer(&p->dl);
init_dl_inactive_task_timer(&p->dl);
__dl_clear_params(p);
init_dl_entity(&p->dl);
INIT_LIST_HEAD(&p->rt.run_list);
p->rt.timeout = 0;

View File

@ -335,6 +335,8 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
__add_rq_bw(new_bw, &rq->dl);
}
static void __dl_clear_params(struct sched_dl_entity *dl_se);
/*
* The utilization of a task cannot be immediately removed from
* the rq active utilization (running_bw) when the task blocks.
@ -434,7 +436,7 @@ static void task_non_contending(struct task_struct *p)
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
__dl_clear_params(dl_se);
}
return;
@ -1183,7 +1185,7 @@ unlock:
return HRTIMER_NORESTART;
}
void init_dl_task_timer(struct sched_dl_entity *dl_se)
static void init_dl_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->dl_timer;
@ -1389,7 +1391,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
__dl_clear_params(dl_se);
goto unlock;
}
@ -1405,7 +1407,7 @@ unlock:
return HRTIMER_NORESTART;
}
void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->inactive_timer;
@ -2957,10 +2959,8 @@ bool __checkparam_dl(const struct sched_attr *attr)
/*
* This function clears the sched_dl_entity static params.
*/
void __dl_clear_params(struct task_struct *p)
static void __dl_clear_params(struct sched_dl_entity *dl_se)
{
struct sched_dl_entity *dl_se = &p->dl;
dl_se->dl_runtime = 0;
dl_se->dl_deadline = 0;
dl_se->dl_period = 0;
@ -2978,6 +2978,14 @@ void __dl_clear_params(struct task_struct *p)
#endif
}
void init_dl_entity(struct sched_dl_entity *dl_se)
{
RB_CLEAR_NODE(&dl_se->rb_node);
init_dl_task_timer(dl_se);
init_dl_inactive_task_timer(dl_se);
__dl_clear_params(dl_se);
}
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;

View File

@ -273,8 +273,6 @@ struct rt_bandwidth {
unsigned int rt_period_active;
};
void __dl_clear_params(struct task_struct *p);
static inline int dl_bandwidth_enabled(void)
{
return sysctl_sched_rt_runtime >= 0;
@ -2427,8 +2425,7 @@ extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
extern void init_dl_entity(struct sched_dl_entity *dl_se);
#define BW_SHIFT 20
#define BW_UNIT (1 << BW_SHIFT)