sched: Make const-safe

With a modified container_of() that preserves constness, the compiler
finds some pointers which should have been marked as const.  task_of()
also needs to become const-preserving for the !FAIR_GROUP_SCHED case so
that cfs_rq_of() can take a const argument.  No change to generated code.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20221212144946.2657785-1-willy@infradead.org
This commit is contained in:
Matthew Wilcox (Oracle) 2022-12-12 14:49:46 +00:00 committed by Peter Zijlstra
parent b344b8f2d8
commit 904cbab71d
3 changed files with 24 additions and 22 deletions

View File

@ -152,7 +152,7 @@ __read_mostly int scheduler_running;
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
/* kernel prio, less is more */
static inline int __task_prio(struct task_struct *p)
static inline int __task_prio(const struct task_struct *p)
{
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
@ -174,7 +174,8 @@ static inline int __task_prio(struct task_struct *p)
*/
/* real prio, less is less */
static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
static inline bool prio_less(const struct task_struct *a,
const struct task_struct *b, bool in_fi)
{
int pa = __task_prio(a), pb = __task_prio(b);
@ -194,7 +195,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool
return false;
}
static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
static inline bool __sched_core_less(const struct task_struct *a,
const struct task_struct *b)
{
if (a->core_cookie < b->core_cookie)
return true;

View File

@ -468,7 +468,7 @@ is_same_group(struct sched_entity *se, struct sched_entity *pse)
return NULL;
}
static inline struct sched_entity *parent_entity(struct sched_entity *se)
static inline struct sched_entity *parent_entity(const struct sched_entity *se)
{
return se->parent;
}
@ -595,8 +595,8 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}
static inline bool entity_before(struct sched_entity *a,
struct sched_entity *b)
static inline bool entity_before(const struct sched_entity *a,
const struct sched_entity *b)
{
return (s64)(a->vruntime - b->vruntime) < 0;
}
@ -11852,7 +11852,8 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
/*
* se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
*/
static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
bool forceidle)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@ -11877,11 +11878,12 @@ void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
}
bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool in_fi)
{
struct rq *rq = task_rq(a);
struct sched_entity *sea = &a->se;
struct sched_entity *seb = &b->se;
const struct sched_entity *sea = &a->se;
const struct sched_entity *seb = &b->se;
struct cfs_rq *cfs_rqa;
struct cfs_rq *cfs_rqb;
s64 delta;

View File

@ -248,7 +248,7 @@ static inline void update_avg(u64 *avg, u64 sample)
#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
{
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
@ -260,8 +260,8 @@ static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
/*
* Tells if entity @a should preempt entity @b.
*/
static inline bool
dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
static inline bool dl_entity_preempt(const struct sched_dl_entity *a,
const struct sched_dl_entity *b)
{
return dl_entity_is_special(a) ||
dl_time_before(a->deadline, b->deadline);
@ -1244,7 +1244,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock;
}
bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool fi);
/*
* Helpers to check if the CPU's core cookie matches with the task's cookie
@ -1423,7 +1424,7 @@ static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
}
/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
return se->cfs_rq;
}
@ -1436,19 +1437,16 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
#else
static inline struct task_struct *task_of(struct sched_entity *se)
{
return container_of(se, struct task_struct, se);
}
#define task_of(_se) container_of(_se, struct task_struct, se)
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p)
{
return &task_rq(p)->cfs;
}
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
struct task_struct *p = task_of(se);
const struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);
return &rq->cfs;