2011-10-25 08:00:11 +00:00
|
|
|
#include "sched.h"
|
|
|
|
|
2010-09-22 11:53:15 +00:00
|
|
|
/*
|
|
|
|
* stop-task scheduling class.
|
|
|
|
*
|
|
|
|
* The stop task is the highest priority task in the system, it preempts
|
|
|
|
* everything and will be preempted by nothing.
|
|
|
|
*
|
|
|
|
* See kernel/stop_machine.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static int
|
2013-10-07 10:29:16 +00:00
|
|
|
select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
|
2010-09-22 11:53:15 +00:00
|
|
|
{
|
|
|
|
return task_cpu(p); /* stop tasks as never migrate */
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
static void
|
|
|
|
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2010-10-31 11:37:04 +00:00
|
|
|
/* we're never preempted */
|
2010-09-22 11:53:15 +00:00
|
|
|
}
|
|
|
|
|
2012-02-11 05:05:00 +00:00
|
|
|
static struct task_struct *
|
2016-09-21 13:38:10 +00:00
|
|
|
pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
2010-09-22 11:53:15 +00:00
|
|
|
{
|
|
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
|
2014-08-20 09:47:32 +00:00
|
|
|
if (!stop || !task_on_rq_queued(stop))
|
2012-02-11 05:05:00 +00:00
|
|
|
return NULL;
|
2010-09-22 11:53:15 +00:00
|
|
|
|
2014-02-12 09:49:30 +00:00
|
|
|
put_prev_task(rq, prev);
|
2012-02-11 05:05:00 +00:00
|
|
|
|
|
|
|
stop->se.exec_start = rq_clock_task(rq);
|
|
|
|
|
|
|
|
return stop;
|
2010-09-22 11:53:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2014-05-08 23:00:14 +00:00
|
|
|
add_nr_running(rq, 1);
|
2010-09-22 11:53:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2014-05-08 23:00:14 +00:00
|
|
|
sub_nr_running(rq, 1);
|
2010-09-22 11:53:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void yield_task_stop(struct rq *rq)
|
|
|
|
{
|
|
|
|
BUG(); /* the stop task should never yield, its pointless. */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
|
|
|
|
{
|
2012-08-04 03:44:14 +00:00
|
|
|
struct task_struct *curr = rq->curr;
|
|
|
|
u64 delta_exec;
|
|
|
|
|
2013-04-11 23:51:02 +00:00
|
|
|
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
2012-08-04 03:44:14 +00:00
|
|
|
if (unlikely((s64)delta_exec < 0))
|
|
|
|
delta_exec = 0;
|
|
|
|
|
|
|
|
schedstat_set(curr->se.statistics.exec_max,
|
|
|
|
max(curr->se.statistics.exec_max, delta_exec));
|
|
|
|
|
|
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
|
|
account_group_exec_runtime(curr, delta_exec);
|
|
|
|
|
2013-04-11 23:51:02 +00:00
|
|
|
curr->se.exec_start = rq_clock_task(rq);
|
2012-08-04 03:44:14 +00:00
|
|
|
cpuacct_charge(curr, delta_exec);
|
2010-09-22 11:53:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_curr_task_stop(struct rq *rq)
|
|
|
|
{
|
2012-08-04 03:44:14 +00:00
|
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
|
2013-04-11 23:51:02 +00:00
|
|
|
stop->se.exec_start = rq_clock_task(rq);
|
2010-09-22 11:53:15 +00:00
|
|
|
}
|
|
|
|
|
2011-01-17 16:03:27 +00:00
|
|
|
static void switched_to_stop(struct rq *rq, struct task_struct *p)
|
2010-09-22 11:53:15 +00:00
|
|
|
{
|
|
|
|
BUG(); /* its impossible to change to this class */
|
|
|
|
}
|
|
|
|
|
2011-01-17 16:03:27 +00:00
|
|
|
static void
|
|
|
|
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
|
2010-09-22 11:53:15 +00:00
|
|
|
{
|
|
|
|
BUG(); /* how!?, what priority? */
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
get_rr_interval_stop(struct rq *rq, struct task_struct *task)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-23 22:04:52 +00:00
|
|
|
static void update_curr_stop(struct rq *rq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2010-09-22 11:53:15 +00:00
|
|
|
/*
|
|
|
|
* Simple, special scheduling class for the per-CPU stop tasks:
|
|
|
|
*/
|
2011-10-25 08:00:11 +00:00
|
|
|
const struct sched_class stop_sched_class = {
|
sched/deadline: Add SCHED_DEADLINE structures & implementation
Introduces the data structures, constants and symbols needed for
SCHED_DEADLINE implementation.
Core data structure of SCHED_DEADLINE are defined, along with their
initializers. Hooks for checking if a task belong to the new policy
are also added where they are needed.
Adds a scheduling class, in sched/dl.c and a new policy called
SCHED_DEADLINE. It is an implementation of the Earliest Deadline
First (EDF) scheduling algorithm, augmented with a mechanism (called
Constant Bandwidth Server, CBS) that makes it possible to isolate
the behaviour of tasks between each other.
The typical -deadline task will be made up of a computation phase
(instance) which is activated on a periodic or sporadic fashion. The
expected (maximum) duration of such computation is called the task's
runtime; the time interval by which each instance need to be completed
is called the task's relative deadline. The task's absolute deadline
is dynamically calculated as the time instant a task (better, an
instance) activates plus the relative deadline.
The EDF algorithms selects the task with the smallest absolute
deadline as the one to be executed first, while the CBS ensures each
task to run for at most its runtime every (relative) deadline
length time interval, avoiding any interference between different
tasks (bandwidth isolation).
Thanks to this feature, also tasks that do not strictly comply with
the computational model sketched above can effectively use the new
policy.
To summarize, this patch:
- introduces the data structures, constants and symbols needed;
- implements the core logic of the scheduling algorithm in the new
scheduling class file;
- provides all the glue code between the new scheduling class and
the core scheduler and refines the interactions between sched/dl
and the other existing scheduling classes.
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Michael Trimarchi <michael@amarulasolutions.com>
Signed-off-by: Fabio Checconi <fchecconi@gmail.com>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-4-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-28 10:14:43 +00:00
|
|
|
.next = &dl_sched_class,
|
2010-09-22 11:53:15 +00:00
|
|
|
|
|
|
|
.enqueue_task = enqueue_task_stop,
|
|
|
|
.dequeue_task = dequeue_task_stop,
|
|
|
|
.yield_task = yield_task_stop,
|
|
|
|
|
|
|
|
.check_preempt_curr = check_preempt_curr_stop,
|
|
|
|
|
|
|
|
.pick_next_task = pick_next_task_stop,
|
|
|
|
.put_prev_task = put_prev_task_stop,
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.select_task_rq = select_task_rq_stop,
|
2015-05-15 15:43:35 +00:00
|
|
|
.set_cpus_allowed = set_cpus_allowed_common,
|
2010-09-22 11:53:15 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
.set_curr_task = set_curr_task_stop,
|
|
|
|
.task_tick = task_tick_stop,
|
|
|
|
|
|
|
|
.get_rr_interval = get_rr_interval_stop,
|
|
|
|
|
|
|
|
.prio_changed = prio_changed_stop,
|
|
|
|
.switched_to = switched_to_stop,
|
2014-11-23 22:04:52 +00:00
|
|
|
.update_curr = update_curr_stop,
|
2010-09-22 11:53:15 +00:00
|
|
|
};
|