nohz: Add TICK_DEP_BIT_RCU
If a nohz_full CPU is looping in the kernel, the scheduling-clock tick might nevertheless remain disabled. In !PREEMPT kernels, this can prevent RCU's attempts to enlist the aid of that CPU's executions of cond_resched(), which can in turn result in an arbitrarily delayed grace period and thus an OOM. RCU therefore needs a way to enable a holdout nohz_full CPU's scheduler-clock interrupt. This commit therefore provides a new TICK_DEP_BIT_RCU value which RCU can pass to tick_dep_set_cpu() and friends to force on the scheduler-clock interrupt for a specified CPU or task. In some cases, rcutorture needs to turn on the scheduler-clock tick, so this commit also exports the relevant symbols to GPL-licensed modules. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
54ecb8f702
commit
01b4c39901
@ -108,7 +108,8 @@ enum tick_dep_bits {
|
||||
TICK_DEP_BIT_POSIX_TIMER = 0,
|
||||
TICK_DEP_BIT_PERF_EVENTS = 1,
|
||||
TICK_DEP_BIT_SCHED = 2,
|
||||
TICK_DEP_BIT_CLOCK_UNSTABLE = 3
|
||||
TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
|
||||
TICK_DEP_BIT_RCU = 4
|
||||
};
|
||||
|
||||
#define TICK_DEP_MASK_NONE 0
|
||||
@ -116,6 +117,7 @@ enum tick_dep_bits {
|
||||
#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
|
||||
#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
|
||||
#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
|
||||
#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
extern bool tick_nohz_enabled;
|
||||
@ -268,6 +270,9 @@ static inline bool tick_nohz_full_enabled(void) { return false; }
|
||||
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
|
||||
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
|
||||
|
||||
static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
|
||||
static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
|
||||
|
||||
static inline void tick_dep_set(enum tick_dep_bits bit) { }
|
||||
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
|
||||
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
|
||||
|
@ -367,7 +367,8 @@ TRACE_EVENT(itimer_expire,
|
||||
tick_dep_name(POSIX_TIMER) \
|
||||
tick_dep_name(PERF_EVENTS) \
|
||||
tick_dep_name(SCHED) \
|
||||
tick_dep_name_end(CLOCK_UNSTABLE)
|
||||
tick_dep_name(CLOCK_UNSTABLE) \
|
||||
tick_dep_name_end(RCU)
|
||||
|
||||
#undef tick_dep_name
|
||||
#undef tick_dep_mask_name
|
||||
|
@ -198,6 +198,11 @@ static bool check_tick_dependency(atomic_t *dep)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (val & TICK_DEP_MASK_RCU) {
|
||||
trace_tick_stop(0, TICK_DEP_MASK_RCU);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -324,6 +329,7 @@ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
|
||||
|
||||
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
|
||||
{
|
||||
@ -331,6 +337,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
|
||||
|
||||
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
|
||||
|
||||
/*
|
||||
* Set a per-task tick dependency. Posix CPU timers need this in order to elapse
|
||||
|
Loading…
Reference in New Issue
Block a user