mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
rcu: Add force_quiescent_state() testing to rcutorture
Add force_quiescent_state() testing to rcutorture, with a separate thread that repeatedly invokes force_quiescent_state() in bursts. This can greatly increase the probability of encountering certain types of race conditions. Suggested-by: Josh Triplett <josh@joshtriplett.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1262646551116-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
46a1e34eda
commit
bf66f18e79
@ -62,6 +62,18 @@ static inline long rcu_batches_completed_bh(void)
|
||||
|
||||
extern int rcu_expedited_torture_stats(char *page);
|
||||
|
||||
static inline void rcu_force_quiescent_state(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_bh_force_quiescent_state(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_sched_force_quiescent_state(void)
|
||||
{
|
||||
}
|
||||
|
||||
#define synchronize_rcu synchronize_sched
|
||||
|
||||
static inline void synchronize_rcu_expedited(void)
|
||||
|
@ -99,6 +99,9 @@ extern void rcu_check_callbacks(int cpu, int user);
|
||||
extern long rcu_batches_completed(void);
|
||||
extern long rcu_batches_completed_bh(void);
|
||||
extern long rcu_batches_completed_sched(void);
|
||||
extern void rcu_force_quiescent_state(void);
|
||||
extern void rcu_bh_force_quiescent_state(void);
|
||||
extern void rcu_sched_force_quiescent_state(void);
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
void rcu_enter_nohz(void);
|
||||
|
@ -61,6 +61,9 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
|
||||
static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
|
||||
static int stutter = 5; /* Start/stop testing interval (in sec) */
|
||||
static int irqreader = 1; /* RCU readers from irq (timers). */
|
||||
static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
|
||||
static int fqs_holdoff = 0; /* Hold time within burst (us). */
|
||||
static int fqs_stutter = 3; /* Wait time between bursts (s). */
|
||||
static char *torture_type = "rcu"; /* What RCU implementation to torture. */
|
||||
|
||||
module_param(nreaders, int, 0444);
|
||||
@ -79,6 +82,12 @@ module_param(stutter, int, 0444);
|
||||
MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
|
||||
module_param(irqreader, int, 0444);
|
||||
MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
|
||||
module_param(fqs_duration, int, 0444);
|
||||
MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
|
||||
module_param(fqs_holdoff, int, 0444);
|
||||
MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
|
||||
module_param(fqs_stutter, int, 0444);
|
||||
MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
|
||||
module_param(torture_type, charp, 0444);
|
||||
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
|
||||
|
||||
@ -99,6 +108,7 @@ static struct task_struct **reader_tasks;
|
||||
static struct task_struct *stats_task;
|
||||
static struct task_struct *shuffler_task;
|
||||
static struct task_struct *stutter_task;
|
||||
static struct task_struct *fqs_task;
|
||||
|
||||
#define RCU_TORTURE_PIPE_LEN 10
|
||||
|
||||
@ -263,6 +273,7 @@ struct rcu_torture_ops {
|
||||
void (*deferred_free)(struct rcu_torture *p);
|
||||
void (*sync)(void);
|
||||
void (*cb_barrier)(void);
|
||||
void (*fqs)(void);
|
||||
int (*stats)(char *page);
|
||||
int irq_capable;
|
||||
char *name;
|
||||
@ -347,6 +358,7 @@ static struct rcu_torture_ops rcu_ops = {
|
||||
.deferred_free = rcu_torture_deferred_free,
|
||||
.sync = synchronize_rcu,
|
||||
.cb_barrier = rcu_barrier,
|
||||
.fqs = rcu_force_quiescent_state,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "rcu"
|
||||
@ -388,6 +400,7 @@ static struct rcu_torture_ops rcu_sync_ops = {
|
||||
.deferred_free = rcu_sync_torture_deferred_free,
|
||||
.sync = synchronize_rcu,
|
||||
.cb_barrier = NULL,
|
||||
.fqs = rcu_force_quiescent_state,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "rcu_sync"
|
||||
@ -403,6 +416,7 @@ static struct rcu_torture_ops rcu_expedited_ops = {
|
||||
.deferred_free = rcu_sync_torture_deferred_free,
|
||||
.sync = synchronize_rcu_expedited,
|
||||
.cb_barrier = NULL,
|
||||
.fqs = rcu_force_quiescent_state,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "rcu_expedited"
|
||||
@ -465,6 +479,7 @@ static struct rcu_torture_ops rcu_bh_ops = {
|
||||
.deferred_free = rcu_bh_torture_deferred_free,
|
||||
.sync = rcu_bh_torture_synchronize,
|
||||
.cb_barrier = rcu_barrier_bh,
|
||||
.fqs = rcu_bh_force_quiescent_state,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "rcu_bh"
|
||||
@ -480,6 +495,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
|
||||
.deferred_free = rcu_sync_torture_deferred_free,
|
||||
.sync = rcu_bh_torture_synchronize,
|
||||
.cb_barrier = NULL,
|
||||
.fqs = rcu_bh_force_quiescent_state,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "rcu_bh_sync"
|
||||
@ -621,6 +637,7 @@ static struct rcu_torture_ops sched_ops = {
|
||||
.deferred_free = rcu_sched_torture_deferred_free,
|
||||
.sync = sched_torture_synchronize,
|
||||
.cb_barrier = rcu_barrier_sched,
|
||||
.fqs = rcu_sched_force_quiescent_state,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "sched"
|
||||
@ -636,6 +653,7 @@ static struct rcu_torture_ops sched_sync_ops = {
|
||||
.deferred_free = rcu_sync_torture_deferred_free,
|
||||
.sync = sched_torture_synchronize,
|
||||
.cb_barrier = NULL,
|
||||
.fqs = rcu_sched_force_quiescent_state,
|
||||
.stats = NULL,
|
||||
.name = "sched_sync"
|
||||
};
|
||||
@ -650,11 +668,44 @@ static struct rcu_torture_ops sched_expedited_ops = {
|
||||
.deferred_free = rcu_sync_torture_deferred_free,
|
||||
.sync = synchronize_sched_expedited,
|
||||
.cb_barrier = NULL,
|
||||
.fqs = rcu_sched_force_quiescent_state,
|
||||
.stats = rcu_expedited_torture_stats,
|
||||
.irq_capable = 1,
|
||||
.name = "sched_expedited"
|
||||
};
|
||||
|
||||
/*
|
||||
* RCU torture force-quiescent-state kthread. Repeatedly induces
|
||||
* bursts of calls to force_quiescent_state(), increasing the probability
|
||||
* of occurrence of some important types of race conditions.
|
||||
*/
|
||||
static int
|
||||
rcu_torture_fqs(void *arg)
|
||||
{
|
||||
unsigned long fqs_resume_time;
|
||||
int fqs_burst_remaining;
|
||||
|
||||
VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
|
||||
do {
|
||||
fqs_resume_time = jiffies + fqs_stutter * HZ;
|
||||
while (jiffies - fqs_resume_time > LONG_MAX) {
|
||||
schedule_timeout_interruptible(1);
|
||||
}
|
||||
fqs_burst_remaining = fqs_duration;
|
||||
while (fqs_burst_remaining > 0) {
|
||||
cur_ops->fqs();
|
||||
udelay(fqs_holdoff);
|
||||
fqs_burst_remaining -= fqs_holdoff;
|
||||
}
|
||||
rcu_stutter_wait("rcu_torture_fqs");
|
||||
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
|
||||
VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
|
||||
rcutorture_shutdown_absorb("rcu_torture_fqs");
|
||||
while (!kthread_should_stop())
|
||||
schedule_timeout_uninterruptible(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* RCU torture writer kthread. Repeatedly substitutes a new structure
|
||||
* for that pointed to by rcu_torture_current, freeing the old structure
|
||||
@ -1030,10 +1081,11 @@ rcu_torture_print_module_parms(char *tag)
|
||||
printk(KERN_ALERT "%s" TORTURE_FLAG
|
||||
"--- %s: nreaders=%d nfakewriters=%d "
|
||||
"stat_interval=%d verbose=%d test_no_idle_hz=%d "
|
||||
"shuffle_interval=%d stutter=%d irqreader=%d\n",
|
||||
"shuffle_interval=%d stutter=%d irqreader=%d "
|
||||
"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n",
|
||||
torture_type, tag, nrealreaders, nfakewriters,
|
||||
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
|
||||
stutter, irqreader);
|
||||
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter);
|
||||
}
|
||||
|
||||
static struct notifier_block rcutorture_nb = {
|
||||
@ -1109,6 +1161,12 @@ rcu_torture_cleanup(void)
|
||||
}
|
||||
stats_task = NULL;
|
||||
|
||||
if (fqs_task) {
|
||||
VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
|
||||
kthread_stop(fqs_task);
|
||||
}
|
||||
fqs_task = NULL;
|
||||
|
||||
/* Wait for all RCU callbacks to fire. */
|
||||
|
||||
if (cur_ops->cb_barrier != NULL)
|
||||
@ -1154,6 +1212,11 @@ rcu_torture_init(void)
|
||||
mutex_unlock(&fullstop_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (cur_ops->fqs == NULL && fqs_duration != 0) {
|
||||
printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
|
||||
"fqs_duration, fqs disabled.\n");
|
||||
fqs_duration = 0;
|
||||
}
|
||||
if (cur_ops->init)
|
||||
cur_ops->init(); /* no "goto unwind" prior to this point!!! */
|
||||
|
||||
@ -1282,6 +1345,19 @@ rcu_torture_init(void)
|
||||
goto unwind;
|
||||
}
|
||||
}
|
||||
if (fqs_duration < 0)
|
||||
fqs_duration = 0;
|
||||
if (fqs_duration) {
|
||||
/* Create the stutter thread */
|
||||
fqs_task = kthread_run(rcu_torture_fqs, NULL,
|
||||
"rcu_torture_fqs");
|
||||
if (IS_ERR(fqs_task)) {
|
||||
firsterr = PTR_ERR(fqs_task);
|
||||
VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
|
||||
fqs_task = NULL;
|
||||
goto unwind;
|
||||
}
|
||||
}
|
||||
register_reboot_notifier(&rcutorture_nb);
|
||||
mutex_unlock(&fullstop_mutex);
|
||||
return 0;
|
||||
|
@ -156,6 +156,24 @@ long rcu_batches_completed_bh(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
|
||||
|
||||
/*
|
||||
* Force a quiescent state for RCU BH.
|
||||
*/
|
||||
void rcu_bh_force_quiescent_state(void)
|
||||
{
|
||||
force_quiescent_state(&rcu_bh_state, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
|
||||
|
||||
/*
|
||||
* Force a quiescent state for RCU-sched.
|
||||
*/
|
||||
void rcu_sched_force_quiescent_state(void)
|
||||
{
|
||||
force_quiescent_state(&rcu_sched_state, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
|
||||
|
||||
/*
|
||||
* Does the CPU have callbacks ready to be invoked?
|
||||
*/
|
||||
|
@ -61,6 +61,15 @@ long rcu_batches_completed(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
||||
|
||||
/*
|
||||
* Force a quiescent state for preemptible RCU.
|
||||
*/
|
||||
void rcu_force_quiescent_state(void)
|
||||
{
|
||||
force_quiescent_state(&rcu_preempt_state, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
|
||||
|
||||
/*
|
||||
* Record a preemptable-RCU quiescent state for the specified CPU. Note
|
||||
* that this just means that the task currently running on the CPU is
|
||||
@ -712,6 +721,16 @@ long rcu_batches_completed(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
||||
|
||||
/*
|
||||
* Force a quiescent state for RCU, which, because there is no preemptible
|
||||
* RCU, becomes the same as rcu-sched.
|
||||
*/
|
||||
void rcu_force_quiescent_state(void)
|
||||
{
|
||||
rcu_sched_force_quiescent_state();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
|
||||
|
||||
/*
|
||||
* Because preemptable RCU does not exist, we never have to check for
|
||||
* CPUs being in quiescent states.
|
||||
|
Loading…
Reference in New Issue
Block a user