forked from Minki/linux
Merge branches 'docs.2022.04.20a', 'fixes.2022.04.20a', 'nocb.2022.04.11b', 'rcu-tasks.2022.04.11b', 'srcu.2022.05.03a', 'torture.2022.04.11b', 'torture-tasks.2022.04.20a' and 'torturescript.2022.04.20a' into HEAD
docs.2022.04.20a: Documentation updates. fixes.2022.04.20a: Miscellaneous fixes. nocb.2022.04.11b: Callback-offloading updates. rcu-tasks.2022.04.11b: RCU-tasks updates. srcu.2022.05.03a: Put SRCU on a memory diet. torture.2022.04.11b: Torture-test updates. torture-tasks.2022.04.20a: Avoid torture testing changing RCU configuration. torturescript.2022.04.20a: Torture-test scripting updates.
This commit is contained in:
commit
be05ee5437
@ -4955,10 +4955,34 @@
|
||||
number avoids disturbing real-time workloads,
|
||||
but lengthens grace periods.
|
||||
|
||||
rcupdate.rcu_task_stall_info= [KNL]
|
||||
Set initial timeout in jiffies for RCU task stall
|
||||
informational messages, which give some indication
|
||||
of the problem for those not patient enough to
|
||||
wait for ten minutes. Informational messages are
|
||||
only printed prior to the stall-warning message
|
||||
for a given grace period. Disable with a value
|
||||
less than or equal to zero. Defaults to ten
|
||||
seconds. A change in value does not take effect
|
||||
until the beginning of the next grace period.
|
||||
|
||||
rcupdate.rcu_task_stall_info_mult= [KNL]
|
||||
Multiplier for time interval between successive
|
||||
RCU task stall informational messages for a given
|
||||
RCU tasks grace period. This value is clamped
|
||||
to one through ten, inclusive. It defaults to
|
||||
the value three, so that the first informational
|
||||
message is printed 10 seconds into the grace
|
||||
period, the second at 40 seconds, the third at
|
||||
160 seconds, and then the stall warning at 600
|
||||
seconds would prevent a fourth at 640 seconds.
|
||||
|
||||
rcupdate.rcu_task_stall_timeout= [KNL]
|
||||
Set timeout in jiffies for RCU task stall warning
|
||||
messages. Disable with a value less than or equal
|
||||
to zero.
|
||||
Set timeout in jiffies for RCU task stall
|
||||
warning messages. Disable with a value less
|
||||
than or equal to zero. Defaults to ten minutes.
|
||||
A change in value does not take effect until
|
||||
the beginning of the next grace period.
|
||||
|
||||
rcupdate.rcu_self_test= [KNL]
|
||||
Run the RCU early boot self tests
|
||||
@ -5377,6 +5401,17 @@
|
||||
smart2= [HW]
|
||||
Format: <io1>[,<io2>[,...,<io8>]]
|
||||
|
||||
smp.csd_lock_timeout= [KNL]
|
||||
Specify the period of time in milliseconds
|
||||
that smp_call_function() and friends will wait
|
||||
for a CPU to release the CSD lock. This is
|
||||
useful when diagnosing bugs involving CPUs
|
||||
disabling interrupts for extended periods
|
||||
of time. Defaults to 5,000 milliseconds, and
|
||||
setting a value of zero disables this feature.
|
||||
This feature may be more efficiently disabled
|
||||
using the csdlock_debug- kernel parameter.
|
||||
|
||||
smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
|
||||
smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
|
||||
smsc-ircc2.ircc_sir= [HW] SIR base I/O port
|
||||
@ -5608,6 +5643,30 @@
|
||||
off: Disable mitigation and remove
|
||||
performance impact to RDRAND and RDSEED
|
||||
|
||||
srcutree.big_cpu_lim [KNL]
|
||||
Specifies the number of CPUs constituting a
|
||||
large system, such that srcu_struct structures
|
||||
should immediately allocate an srcu_node array.
|
||||
This kernel-boot parameter defaults to 128,
|
||||
but takes effect only when the low-order four
|
||||
bits of srcutree.convert_to_big is equal to 3
|
||||
(decide at boot).
|
||||
|
||||
srcutree.convert_to_big [KNL]
|
||||
Specifies under what conditions an SRCU tree
|
||||
srcu_struct structure will be converted to big
|
||||
form, that is, with an rcu_node tree:
|
||||
|
||||
0: Never.
|
||||
1: At init_srcu_struct() time.
|
||||
2: When rcutorture decides to.
|
||||
3: Decide at boot time (default).
|
||||
0x1X: Above plus if high contention.
|
||||
|
||||
Either way, the srcu_node tree will be sized based
|
||||
on the actual runtime number of CPUs (nr_cpu_ids)
|
||||
instead of the compile-time CONFIG_NR_CPUS.
|
||||
|
||||
srcutree.counter_wrap_check [KNL]
|
||||
Specifies how frequently to check for
|
||||
grace-period sequence counter wrap for the
|
||||
@ -5625,6 +5684,14 @@
|
||||
expediting. Set to zero to disable automatic
|
||||
expediting.
|
||||
|
||||
srcutree.small_contention_lim [KNL]
|
||||
Specifies the number of update-side contention
|
||||
events per jiffy will be tolerated before
|
||||
initiating a conversion of an srcu_struct
|
||||
structure to big form. Note that the value of
|
||||
srcutree.convert_to_big must have the 0x10 bit
|
||||
set for contention-based conversions to occur.
|
||||
|
||||
ssbd= [ARM64,HW]
|
||||
Speculative Store Bypass Disable control
|
||||
|
||||
|
@ -35,6 +35,7 @@ config KPROBES
|
||||
depends on MODULES
|
||||
depends on HAVE_KPROBES
|
||||
select KALLSYMS
|
||||
select TASKS_RCU if PREEMPTION
|
||||
help
|
||||
Kprobes allows you to trap at almost any kernel address and
|
||||
execute a callback function. register_kprobe() establishes
|
||||
|
@ -196,6 +196,7 @@ void synchronize_rcu_tasks_rude(void);
|
||||
void exit_tasks_rcu_start(void);
|
||||
void exit_tasks_rcu_finish(void);
|
||||
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
|
||||
#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
|
||||
#define rcu_tasks_qs(t, preempt) do { } while (0)
|
||||
#define rcu_note_voluntary_context_switch(t) do { } while (0)
|
||||
#define call_rcu_tasks call_rcu
|
||||
|
@ -2117,6 +2117,47 @@ static inline void cond_resched_rcu(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
|
||||
extern bool preempt_model_none(void);
|
||||
extern bool preempt_model_voluntary(void);
|
||||
extern bool preempt_model_full(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool preempt_model_none(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_PREEMPT_NONE);
|
||||
}
|
||||
static inline bool preempt_model_voluntary(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
|
||||
}
|
||||
static inline bool preempt_model_full(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_PREEMPT);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline bool preempt_model_rt(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_PREEMPT_RT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Does the preemption model allow non-cooperative preemption?
|
||||
*
|
||||
* For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
|
||||
* CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
|
||||
* kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
|
||||
* PREEMPT_NONE model.
|
||||
*/
|
||||
static inline bool preempt_model_preemptible(void)
|
||||
{
|
||||
return preempt_model_full() || preempt_model_rt();
|
||||
}
|
||||
|
||||
/*
|
||||
* Does a critical section need to be broken due to another
|
||||
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
|
||||
|
@ -47,11 +47,9 @@ struct srcu_data {
|
||||
*/
|
||||
struct srcu_node {
|
||||
spinlock_t __private lock;
|
||||
unsigned long srcu_have_cbs[4]; /* GP seq for children */
|
||||
/* having CBs, but only */
|
||||
/* is > ->srcu_gq_seq. */
|
||||
unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
|
||||
/* have CBs for given GP? */
|
||||
unsigned long srcu_have_cbs[4]; /* GP seq for children having CBs, but only */
|
||||
/* if greater than ->srcu_gq_seq. */
|
||||
unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs have CBs for given GP? */
|
||||
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
|
||||
struct srcu_node *srcu_parent; /* Next up in tree. */
|
||||
int grplo; /* Least CPU for node. */
|
||||
@ -62,18 +60,24 @@ struct srcu_node {
|
||||
* Per-SRCU-domain structure, similar in function to rcu_state.
|
||||
*/
|
||||
struct srcu_struct {
|
||||
struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */
|
||||
struct srcu_node *node; /* Combining tree. */
|
||||
struct srcu_node *level[RCU_NUM_LVLS + 1];
|
||||
/* First node at each level. */
|
||||
int srcu_size_state; /* Small-to-big transition state. */
|
||||
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
|
||||
spinlock_t __private lock; /* Protect counters */
|
||||
spinlock_t __private lock; /* Protect counters and size state. */
|
||||
struct mutex srcu_gp_mutex; /* Serialize GP work. */
|
||||
unsigned int srcu_idx; /* Current rdr array element. */
|
||||
unsigned long srcu_gp_seq; /* Grace-period seq #. */
|
||||
unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
|
||||
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
|
||||
unsigned long srcu_gp_start; /* Last GP start timestamp (jiffies) */
|
||||
unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
|
||||
unsigned long srcu_size_jiffies; /* Current contention-measurement interval. */
|
||||
unsigned long srcu_n_lock_retries; /* Contention events in current interval. */
|
||||
unsigned long srcu_n_exp_nodelay; /* # expedited no-delays in current GP phase. */
|
||||
struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
|
||||
bool sda_is_static; /* May ->sda be passed to free_percpu()? */
|
||||
unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
|
||||
struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
|
||||
struct completion srcu_barrier_completion;
|
||||
@ -81,10 +85,23 @@ struct srcu_struct {
|
||||
atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
|
||||
/* callback for the barrier */
|
||||
/* operation. */
|
||||
unsigned long reschedule_jiffies;
|
||||
unsigned long reschedule_count;
|
||||
struct delayed_work work;
|
||||
struct lockdep_map dep_map;
|
||||
};
|
||||
|
||||
/* Values for size state variable (->srcu_size_state). */
|
||||
#define SRCU_SIZE_SMALL 0
|
||||
#define SRCU_SIZE_ALLOC 1
|
||||
#define SRCU_SIZE_WAIT_BARRIER 2
|
||||
#define SRCU_SIZE_WAIT_CALL 3
|
||||
#define SRCU_SIZE_WAIT_CBS1 4
|
||||
#define SRCU_SIZE_WAIT_CBS2 5
|
||||
#define SRCU_SIZE_WAIT_CBS3 6
|
||||
#define SRCU_SIZE_WAIT_CBS4 7
|
||||
#define SRCU_SIZE_BIG 8
|
||||
|
||||
/* Values for state variable (bottom bits of ->srcu_gp_seq). */
|
||||
#define SRCU_STATE_IDLE 0
|
||||
#define SRCU_STATE_SCAN1 1
|
||||
@ -121,6 +138,7 @@ struct srcu_struct {
|
||||
#ifdef MODULE
|
||||
# define __DEFINE_SRCU(name, is_static) \
|
||||
is_static struct srcu_struct name; \
|
||||
extern struct srcu_struct * const __srcu_struct_##name; \
|
||||
struct srcu_struct * const __srcu_struct_##name \
|
||||
__section("___srcu_struct_ptrs") = &name
|
||||
#else
|
||||
|
@ -118,7 +118,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
|
||||
_torture_stop_kthread("Stopping " #n " task", &(tp))
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
#define torture_preempt_schedule() preempt_schedule()
|
||||
#define torture_preempt_schedule() __preempt_schedule()
|
||||
#else
|
||||
#define torture_preempt_schedule() do { } while (0)
|
||||
#endif
|
||||
|
@ -27,6 +27,7 @@ config BPF_SYSCALL
|
||||
bool "Enable bpf() system call"
|
||||
select BPF
|
||||
select IRQ_WORK
|
||||
select TASKS_RCU if PREEMPTION
|
||||
select TASKS_TRACE_RCU
|
||||
select BINARY_PRINTF
|
||||
select NET_SOCK_MSG if NET
|
||||
|
@ -77,31 +77,56 @@ config TASKS_RCU_GENERIC
|
||||
This option enables generic infrastructure code supporting
|
||||
task-based RCU implementations. Not for manual selection.
|
||||
|
||||
config TASKS_RCU
|
||||
def_bool PREEMPTION
|
||||
config FORCE_TASKS_RCU
|
||||
bool "Force selection of TASKS_RCU"
|
||||
depends on RCU_EXPERT
|
||||
select TASKS_RCU
|
||||
default n
|
||||
help
|
||||
This option enables a task-based RCU implementation that uses
|
||||
only voluntary context switch (not preemption!), idle, and
|
||||
user-mode execution as quiescent states. Not for manual selection.
|
||||
This option force-enables a task-based RCU implementation
|
||||
that uses only voluntary context switch (not preemption!),
|
||||
idle, and user-mode execution as quiescent states. Not for
|
||||
manual selection in most cases.
|
||||
|
||||
config TASKS_RCU
|
||||
bool
|
||||
default n
|
||||
select IRQ_WORK
|
||||
|
||||
config FORCE_TASKS_RUDE_RCU
|
||||
bool "Force selection of Tasks Rude RCU"
|
||||
depends on RCU_EXPERT
|
||||
select TASKS_RUDE_RCU
|
||||
default n
|
||||
help
|
||||
This option force-enables a task-based RCU implementation
|
||||
that uses only context switch (including preemption) and
|
||||
user-mode execution as quiescent states. It forces IPIs and
|
||||
context switches on all online CPUs, including idle ones,
|
||||
so use with caution. Not for manual selection in most cases.
|
||||
|
||||
config TASKS_RUDE_RCU
|
||||
def_bool 0
|
||||
help
|
||||
This option enables a task-based RCU implementation that uses
|
||||
only context switch (including preemption) and user-mode
|
||||
execution as quiescent states. It forces IPIs and context
|
||||
switches on all online CPUs, including idle ones, so use
|
||||
with caution.
|
||||
|
||||
config TASKS_TRACE_RCU
|
||||
def_bool 0
|
||||
bool
|
||||
default n
|
||||
select IRQ_WORK
|
||||
|
||||
config FORCE_TASKS_TRACE_RCU
|
||||
bool "Force selection of Tasks Trace RCU"
|
||||
depends on RCU_EXPERT
|
||||
select TASKS_TRACE_RCU
|
||||
default n
|
||||
help
|
||||
This option enables a task-based RCU implementation that uses
|
||||
explicit rcu_read_lock_trace() read-side markers, and allows
|
||||
these readers to appear in the idle loop as well as on the CPU
|
||||
hotplug code paths. It can force IPIs on online CPUs, including
|
||||
idle ones, so use with caution.
|
||||
these readers to appear in the idle loop as well as on the
|
||||
CPU hotplug code paths. It can force IPIs on online CPUs,
|
||||
including idle ones, so use with caution. Not for manual
|
||||
selection in most cases.
|
||||
|
||||
config TASKS_TRACE_RCU
|
||||
bool
|
||||
default n
|
||||
select IRQ_WORK
|
||||
|
||||
config RCU_STALL_COMMON
|
||||
def_bool TREE_RCU
|
||||
@ -225,7 +250,7 @@ config RCU_NOCB_CPU
|
||||
|
||||
config TASKS_TRACE_RCU_READ_MB
|
||||
bool "Tasks Trace RCU readers use memory barriers in user and idle"
|
||||
depends on RCU_EXPERT
|
||||
depends on RCU_EXPERT && TASKS_TRACE_RCU
|
||||
default PREEMPT_RT || NR_CPUS < 8
|
||||
help
|
||||
Use this option to further reduce the number of IPIs sent
|
||||
|
@ -28,9 +28,6 @@ config RCU_SCALE_TEST
|
||||
depends on DEBUG_KERNEL
|
||||
select TORTURE_TEST
|
||||
select SRCU
|
||||
select TASKS_RCU
|
||||
select TASKS_RUDE_RCU
|
||||
select TASKS_TRACE_RCU
|
||||
default n
|
||||
help
|
||||
This option provides a kernel module that runs performance
|
||||
@ -47,9 +44,6 @@ config RCU_TORTURE_TEST
|
||||
depends on DEBUG_KERNEL
|
||||
select TORTURE_TEST
|
||||
select SRCU
|
||||
select TASKS_RCU
|
||||
select TASKS_RUDE_RCU
|
||||
select TASKS_TRACE_RCU
|
||||
default n
|
||||
help
|
||||
This option provides a kernel module that runs torture tests
|
||||
@ -66,9 +60,6 @@ config RCU_REF_SCALE_TEST
|
||||
depends on DEBUG_KERNEL
|
||||
select TORTURE_TEST
|
||||
select SRCU
|
||||
select TASKS_RCU
|
||||
select TASKS_RUDE_RCU
|
||||
select TASKS_TRACE_RCU
|
||||
default n
|
||||
help
|
||||
This option provides a kernel module that runs performance tests
|
||||
|
@ -523,6 +523,8 @@ static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { ret
|
||||
static inline void show_rcu_gp_kthreads(void) { }
|
||||
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
|
||||
static inline void rcu_fwd_progress_check(unsigned long j) { }
|
||||
static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
|
||||
static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
|
||||
#else /* #ifdef CONFIG_TINY_RCU */
|
||||
bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
|
||||
unsigned long rcu_get_gp_seq(void);
|
||||
@ -535,13 +537,13 @@ void rcu_fwd_progress_check(unsigned long j);
|
||||
void rcu_force_quiescent_state(void);
|
||||
extern struct workqueue_struct *rcu_gp_wq;
|
||||
extern struct workqueue_struct *rcu_par_gp_wq;
|
||||
void rcu_gp_slow_register(atomic_t *rgssp);
|
||||
void rcu_gp_slow_unregister(atomic_t *rgssp);
|
||||
#endif /* #else #ifdef CONFIG_TINY_RCU */
|
||||
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
bool rcu_is_nocb_cpu(int cpu);
|
||||
void rcu_bind_current_to_nocb(void);
|
||||
#else
|
||||
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
|
||||
static inline void rcu_bind_current_to_nocb(void) { }
|
||||
#endif
|
||||
|
||||
|
@ -505,10 +505,10 @@ void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
|
||||
WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]);
|
||||
|
||||
/*
|
||||
* Callbacks moved, so clean up the misordered ->tails[] pointers
|
||||
* that now point into the middle of the list of ready-to-invoke
|
||||
* callbacks. The overall effect is to copy down the later pointers
|
||||
* into the gap that was created by the now-ready segments.
|
||||
* Callbacks moved, so there might be an empty RCU_WAIT_TAIL
|
||||
* and a non-empty RCU_NEXT_READY_TAIL. If so, copy the
|
||||
* RCU_NEXT_READY_TAIL segment to fill the RCU_WAIT_TAIL gap
|
||||
* created by the now-ready-to-invoke segments.
|
||||
*/
|
||||
for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
|
||||
if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL])
|
||||
|
@ -268,6 +268,8 @@ static struct rcu_scale_ops srcud_ops = {
|
||||
.name = "srcud"
|
||||
};
|
||||
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
|
||||
/*
|
||||
* Definitions for RCU-tasks scalability testing.
|
||||
*/
|
||||
@ -295,6 +297,16 @@ static struct rcu_scale_ops tasks_ops = {
|
||||
.name = "tasks"
|
||||
};
|
||||
|
||||
#define TASKS_OPS &tasks_ops,
|
||||
|
||||
#else // #ifdef CONFIG_TASKS_RCU
|
||||
|
||||
#define TASKS_OPS
|
||||
|
||||
#endif // #else // #ifdef CONFIG_TASKS_RCU
|
||||
|
||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
/*
|
||||
* Definitions for RCU-tasks-trace scalability testing.
|
||||
*/
|
||||
@ -324,6 +336,14 @@ static struct rcu_scale_ops tasks_tracing_ops = {
|
||||
.name = "tasks-tracing"
|
||||
};
|
||||
|
||||
#define TASKS_TRACING_OPS &tasks_tracing_ops,
|
||||
|
||||
#else // #ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
#define TASKS_TRACING_OPS
|
||||
|
||||
#endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
|
||||
{
|
||||
if (!cur_ops->gp_diff)
|
||||
@ -797,7 +817,7 @@ rcu_scale_init(void)
|
||||
long i;
|
||||
int firsterr = 0;
|
||||
static struct rcu_scale_ops *scale_ops[] = {
|
||||
&rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops, &tasks_tracing_ops
|
||||
&rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_TRACING_OPS
|
||||
};
|
||||
|
||||
if (!torture_init_begin(scale_type, verbose))
|
||||
|
@ -737,6 +737,50 @@ static struct rcu_torture_ops busted_srcud_ops = {
|
||||
.name = "busted_srcud"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
|
||||
* This implementation does not necessarily work well with CPU hotplug.
|
||||
*/
|
||||
|
||||
static void synchronize_rcu_trivial(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
|
||||
{
|
||||
preempt_disable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static struct rcu_torture_ops trivial_ops = {
|
||||
.ttype = RCU_TRIVIAL_FLAVOR,
|
||||
.init = rcu_sync_torture_init,
|
||||
.readlock = rcu_torture_read_lock_trivial,
|
||||
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
|
||||
.readunlock = rcu_torture_read_unlock_trivial,
|
||||
.readlock_held = torture_readlock_not_held,
|
||||
.get_gp_seq = rcu_no_completed,
|
||||
.sync = synchronize_rcu_trivial,
|
||||
.exp_sync = synchronize_rcu_trivial,
|
||||
.fqs = NULL,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "trivial"
|
||||
};
|
||||
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
|
||||
/*
|
||||
* Definitions for RCU-tasks torture testing.
|
||||
*/
|
||||
@ -780,47 +824,16 @@ static struct rcu_torture_ops tasks_ops = {
|
||||
.name = "tasks"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
|
||||
* This implementation does not necessarily work well with CPU hotplug.
|
||||
*/
|
||||
#define TASKS_OPS &tasks_ops,
|
||||
|
||||
static void synchronize_rcu_trivial(void)
|
||||
{
|
||||
int cpu;
|
||||
#else // #ifdef CONFIG_TASKS_RCU
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
||||
}
|
||||
}
|
||||
#define TASKS_OPS
|
||||
|
||||
static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
|
||||
{
|
||||
preempt_disable();
|
||||
return 0;
|
||||
}
|
||||
#endif // #else #ifdef CONFIG_TASKS_RCU
|
||||
|
||||
static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static struct rcu_torture_ops trivial_ops = {
|
||||
.ttype = RCU_TRIVIAL_FLAVOR,
|
||||
.init = rcu_sync_torture_init,
|
||||
.readlock = rcu_torture_read_lock_trivial,
|
||||
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
|
||||
.readunlock = rcu_torture_read_unlock_trivial,
|
||||
.readlock_held = torture_readlock_not_held,
|
||||
.get_gp_seq = rcu_no_completed,
|
||||
.sync = synchronize_rcu_trivial,
|
||||
.exp_sync = synchronize_rcu_trivial,
|
||||
.fqs = NULL,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "trivial"
|
||||
};
|
||||
#ifdef CONFIG_TASKS_RUDE_RCU
|
||||
|
||||
/*
|
||||
* Definitions for rude RCU-tasks torture testing.
|
||||
@ -851,6 +864,17 @@ static struct rcu_torture_ops tasks_rude_ops = {
|
||||
.name = "tasks-rude"
|
||||
};
|
||||
|
||||
#define TASKS_RUDE_OPS &tasks_rude_ops,
|
||||
|
||||
#else // #ifdef CONFIG_TASKS_RUDE_RCU
|
||||
|
||||
#define TASKS_RUDE_OPS
|
||||
|
||||
#endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
|
||||
|
||||
|
||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
/*
|
||||
* Definitions for tracing RCU-tasks torture testing.
|
||||
*/
|
||||
@ -893,6 +917,15 @@ static struct rcu_torture_ops tasks_tracing_ops = {
|
||||
.name = "tasks-tracing"
|
||||
};
|
||||
|
||||
#define TASKS_TRACING_OPS &tasks_tracing_ops,
|
||||
|
||||
#else // #ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
#define TASKS_TRACING_OPS
|
||||
|
||||
#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
|
||||
static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
|
||||
{
|
||||
if (!cur_ops->gp_diff)
|
||||
@ -1178,7 +1211,7 @@ rcu_torture_writer(void *arg)
|
||||
" GP expediting controlled from boot/sysfs for %s.\n",
|
||||
torture_type, cur_ops->name);
|
||||
if (WARN_ONCE(nsynctypes == 0,
|
||||
"rcu_torture_writer: No update-side primitives.\n")) {
|
||||
"%s: No update-side primitives.\n", __func__)) {
|
||||
/*
|
||||
* No updates primitives, so don't try updating.
|
||||
* The resulting test won't be testing much, hence the
|
||||
@ -1186,6 +1219,7 @@ rcu_torture_writer(void *arg)
|
||||
*/
|
||||
rcu_torture_writer_state = RTWS_STOPPING;
|
||||
torture_kthread_stopping("rcu_torture_writer");
|
||||
return 0;
|
||||
}
|
||||
|
||||
do {
|
||||
@ -1322,6 +1356,17 @@ rcu_torture_fakewriter(void *arg)
|
||||
VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
|
||||
set_user_nice(current, MAX_NICE);
|
||||
|
||||
if (WARN_ONCE(nsynctypes == 0,
|
||||
"%s: No update-side primitives.\n", __func__)) {
|
||||
/*
|
||||
* No updates primitives, so don't try updating.
|
||||
* The resulting test won't be testing much, hence the
|
||||
* above WARN_ONCE().
|
||||
*/
|
||||
torture_kthread_stopping("rcu_torture_fakewriter");
|
||||
return 0;
|
||||
}
|
||||
|
||||
do {
|
||||
torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
|
||||
if (cur_ops->cb_barrier != NULL &&
|
||||
@ -2916,10 +2961,12 @@ rcu_torture_cleanup(void)
|
||||
pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
|
||||
cur_ops->cb_barrier();
|
||||
}
|
||||
rcu_gp_slow_unregister(NULL);
|
||||
return;
|
||||
}
|
||||
if (!cur_ops) {
|
||||
torture_cleanup_end();
|
||||
rcu_gp_slow_unregister(NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3016,6 +3063,7 @@ rcu_torture_cleanup(void)
|
||||
else
|
||||
rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
|
||||
torture_cleanup_end();
|
||||
rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
|
||||
@ -3096,9 +3144,9 @@ rcu_torture_init(void)
|
||||
int flags = 0;
|
||||
unsigned long gp_seq = 0;
|
||||
static struct rcu_torture_ops *torture_ops[] = {
|
||||
&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
|
||||
&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
|
||||
&tasks_tracing_ops, &trivial_ops,
|
||||
&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
|
||||
TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
|
||||
&trivial_ops,
|
||||
};
|
||||
|
||||
if (!torture_init_begin(torture_type, verbose))
|
||||
@ -3320,6 +3368,7 @@ rcu_torture_init(void)
|
||||
if (object_debug)
|
||||
rcu_test_debug_objects();
|
||||
torture_init_end();
|
||||
rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
|
@ -207,6 +207,8 @@ static struct ref_scale_ops srcu_ops = {
|
||||
.name = "srcu"
|
||||
};
|
||||
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
|
||||
// Definitions for RCU Tasks ref scale testing: Empty read markers.
|
||||
// These definitions also work for RCU Rude readers.
|
||||
static void rcu_tasks_ref_scale_read_section(const int nloops)
|
||||
@ -232,6 +234,16 @@ static struct ref_scale_ops rcu_tasks_ops = {
|
||||
.name = "rcu-tasks"
|
||||
};
|
||||
|
||||
#define RCU_TASKS_OPS &rcu_tasks_ops,
|
||||
|
||||
#else // #ifdef CONFIG_TASKS_RCU
|
||||
|
||||
#define RCU_TASKS_OPS
|
||||
|
||||
#endif // #else // #ifdef CONFIG_TASKS_RCU
|
||||
|
||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
// Definitions for RCU Tasks Trace ref scale testing.
|
||||
static void rcu_trace_ref_scale_read_section(const int nloops)
|
||||
{
|
||||
@ -261,6 +273,14 @@ static struct ref_scale_ops rcu_trace_ops = {
|
||||
.name = "rcu-trace"
|
||||
};
|
||||
|
||||
#define RCU_TRACE_OPS &rcu_trace_ops,
|
||||
|
||||
#else // #ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
#define RCU_TRACE_OPS
|
||||
|
||||
#endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
// Definitions for reference count
|
||||
static atomic_t refcnt;
|
||||
|
||||
@ -790,7 +810,7 @@ ref_scale_init(void)
|
||||
long i;
|
||||
int firsterr = 0;
|
||||
static struct ref_scale_ops *scale_ops[] = {
|
||||
&rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops,
|
||||
&rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops,
|
||||
&rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops,
|
||||
};
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
#include "rcu.h"
|
||||
@ -38,6 +39,35 @@ module_param(exp_holdoff, ulong, 0444);
|
||||
static ulong counter_wrap_check = (ULONG_MAX >> 2);
|
||||
module_param(counter_wrap_check, ulong, 0444);
|
||||
|
||||
/*
|
||||
* Control conversion to SRCU_SIZE_BIG:
|
||||
* 0: Don't convert at all.
|
||||
* 1: Convert at init_srcu_struct() time.
|
||||
* 2: Convert when rcutorture invokes srcu_torture_stats_print().
|
||||
* 3: Decide at boot time based on system shape (default).
|
||||
* 0x1x: Convert when excessive contention encountered.
|
||||
*/
|
||||
#define SRCU_SIZING_NONE 0
|
||||
#define SRCU_SIZING_INIT 1
|
||||
#define SRCU_SIZING_TORTURE 2
|
||||
#define SRCU_SIZING_AUTO 3
|
||||
#define SRCU_SIZING_CONTEND 0x10
|
||||
#define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
|
||||
#define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
|
||||
#define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
|
||||
#define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
|
||||
#define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
|
||||
static int convert_to_big = SRCU_SIZING_AUTO;
|
||||
module_param(convert_to_big, int, 0444);
|
||||
|
||||
/* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
|
||||
static int big_cpu_lim __read_mostly = 128;
|
||||
module_param(big_cpu_lim, int, 0444);
|
||||
|
||||
/* Contention events per jiffy to initiate transition to big. */
|
||||
static int small_contention_lim __read_mostly = 100;
|
||||
module_param(small_contention_lim, int, 0444);
|
||||
|
||||
/* Early-boot callback-management, so early that no lock is required! */
|
||||
static LIST_HEAD(srcu_boot_list);
|
||||
static bool __read_mostly srcu_init_done;
|
||||
@ -48,39 +78,90 @@ static void process_srcu(struct work_struct *work);
|
||||
static void srcu_delay_timer(struct timer_list *t);
|
||||
|
||||
/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
|
||||
#define spin_lock_rcu_node(p) \
|
||||
do { \
|
||||
spin_lock(&ACCESS_PRIVATE(p, lock)); \
|
||||
smp_mb__after_unlock_lock(); \
|
||||
#define spin_lock_rcu_node(p) \
|
||||
do { \
|
||||
spin_lock(&ACCESS_PRIVATE(p, lock)); \
|
||||
smp_mb__after_unlock_lock(); \
|
||||
} while (0)
|
||||
|
||||
#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
|
||||
|
||||
#define spin_lock_irq_rcu_node(p) \
|
||||
do { \
|
||||
spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
|
||||
smp_mb__after_unlock_lock(); \
|
||||
#define spin_lock_irq_rcu_node(p) \
|
||||
do { \
|
||||
spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
|
||||
smp_mb__after_unlock_lock(); \
|
||||
} while (0)
|
||||
|
||||
#define spin_unlock_irq_rcu_node(p) \
|
||||
#define spin_unlock_irq_rcu_node(p) \
|
||||
spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
|
||||
|
||||
#define spin_lock_irqsave_rcu_node(p, flags) \
|
||||
do { \
|
||||
spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
|
||||
smp_mb__after_unlock_lock(); \
|
||||
#define spin_lock_irqsave_rcu_node(p, flags) \
|
||||
do { \
|
||||
spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
|
||||
smp_mb__after_unlock_lock(); \
|
||||
} while (0)
|
||||
|
||||
#define spin_unlock_irqrestore_rcu_node(p, flags) \
|
||||
spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
|
||||
#define spin_trylock_irqsave_rcu_node(p, flags) \
|
||||
({ \
|
||||
bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
|
||||
\
|
||||
if (___locked) \
|
||||
smp_mb__after_unlock_lock(); \
|
||||
___locked; \
|
||||
})
|
||||
|
||||
#define spin_unlock_irqrestore_rcu_node(p, flags) \
|
||||
spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
|
||||
|
||||
/*
|
||||
* Initialize SRCU combining tree. Note that statically allocated
|
||||
* Initialize SRCU per-CPU data. Note that statically allocated
|
||||
* srcu_struct structures might already have srcu_read_lock() and
|
||||
* srcu_read_unlock() running against them. So if the is_static parameter
|
||||
* is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
|
||||
*/
|
||||
static void init_srcu_struct_nodes(struct srcu_struct *ssp)
|
||||
static void init_srcu_struct_data(struct srcu_struct *ssp)
|
||||
{
|
||||
int cpu;
|
||||
struct srcu_data *sdp;
|
||||
|
||||
/*
|
||||
* Initialize the per-CPU srcu_data array, which feeds into the
|
||||
* leaves of the srcu_node tree.
|
||||
*/
|
||||
WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
|
||||
ARRAY_SIZE(sdp->srcu_unlock_count));
|
||||
for_each_possible_cpu(cpu) {
|
||||
sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
|
||||
rcu_segcblist_init(&sdp->srcu_cblist);
|
||||
sdp->srcu_cblist_invoking = false;
|
||||
sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
|
||||
sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
|
||||
sdp->mynode = NULL;
|
||||
sdp->cpu = cpu;
|
||||
INIT_WORK(&sdp->work, srcu_invoke_callbacks);
|
||||
timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
|
||||
sdp->ssp = ssp;
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalid seq state, used during snp node initialization */
|
||||
#define SRCU_SNP_INIT_SEQ 0x2
|
||||
|
||||
/*
|
||||
* Check whether sequence number corresponding to snp node,
|
||||
* is invalid.
|
||||
*/
|
||||
static inline bool srcu_invl_snp_seq(unsigned long s)
|
||||
{
|
||||
return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocated and initialize SRCU combining tree. Returns @true if
|
||||
* allocation succeeded and @false otherwise.
|
||||
*/
|
||||
static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
|
||||
{
|
||||
int cpu;
|
||||
int i;
|
||||
@ -92,6 +173,9 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp)
|
||||
|
||||
/* Initialize geometry if it has not already been initialized. */
|
||||
rcu_init_geometry();
|
||||
ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
|
||||
if (!ssp->node)
|
||||
return false;
|
||||
|
||||
/* Work out the overall tree geometry. */
|
||||
ssp->level[0] = &ssp->node[0];
|
||||
@ -105,10 +189,10 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp)
|
||||
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
|
||||
ARRAY_SIZE(snp->srcu_data_have_cbs));
|
||||
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
|
||||
snp->srcu_have_cbs[i] = 0;
|
||||
snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
|
||||
snp->srcu_data_have_cbs[i] = 0;
|
||||
}
|
||||
snp->srcu_gp_seq_needed_exp = 0;
|
||||
snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
|
||||
snp->grplo = -1;
|
||||
snp->grphi = -1;
|
||||
if (snp == &ssp->node[0]) {
|
||||
@ -129,39 +213,31 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp)
|
||||
* Initialize the per-CPU srcu_data array, which feeds into the
|
||||
* leaves of the srcu_node tree.
|
||||
*/
|
||||
WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
|
||||
ARRAY_SIZE(sdp->srcu_unlock_count));
|
||||
level = rcu_num_lvls - 1;
|
||||
snp_first = ssp->level[level];
|
||||
for_each_possible_cpu(cpu) {
|
||||
sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
|
||||
rcu_segcblist_init(&sdp->srcu_cblist);
|
||||
sdp->srcu_cblist_invoking = false;
|
||||
sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
|
||||
sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
|
||||
sdp->mynode = &snp_first[cpu / levelspread[level]];
|
||||
for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
|
||||
if (snp->grplo < 0)
|
||||
snp->grplo = cpu;
|
||||
snp->grphi = cpu;
|
||||
}
|
||||
sdp->cpu = cpu;
|
||||
INIT_WORK(&sdp->work, srcu_invoke_callbacks);
|
||||
timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
|
||||
sdp->ssp = ssp;
|
||||
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
|
||||
}
|
||||
smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize non-compile-time initialized fields, including the
|
||||
* associated srcu_node and srcu_data structures. The is_static
|
||||
* parameter is passed through to init_srcu_struct_nodes(), and
|
||||
* also tells us that ->sda has already been wired up to srcu_data.
|
||||
* associated srcu_node and srcu_data structures. The is_static parameter
|
||||
* tells us that ->sda has already been wired up to srcu_data.
|
||||
*/
|
||||
static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
|
||||
{
|
||||
ssp->srcu_size_state = SRCU_SIZE_SMALL;
|
||||
ssp->node = NULL;
|
||||
mutex_init(&ssp->srcu_cb_mutex);
|
||||
mutex_init(&ssp->srcu_gp_mutex);
|
||||
ssp->srcu_idx = 0;
|
||||
@ -170,13 +246,25 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
|
||||
mutex_init(&ssp->srcu_barrier_mutex);
|
||||
atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
|
||||
INIT_DELAYED_WORK(&ssp->work, process_srcu);
|
||||
ssp->sda_is_static = is_static;
|
||||
if (!is_static)
|
||||
ssp->sda = alloc_percpu(struct srcu_data);
|
||||
if (!ssp->sda)
|
||||
return -ENOMEM;
|
||||
init_srcu_struct_nodes(ssp);
|
||||
init_srcu_struct_data(ssp);
|
||||
ssp->srcu_gp_seq_needed_exp = 0;
|
||||
ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
|
||||
if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
|
||||
if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
|
||||
if (!ssp->sda_is_static) {
|
||||
free_percpu(ssp->sda);
|
||||
ssp->sda = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
|
||||
}
|
||||
}
|
||||
smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
|
||||
return 0;
|
||||
}
|
||||
@ -213,6 +301,86 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
/*
|
||||
* Initiate a transition to SRCU_SIZE_BIG with lock held.
|
||||
*/
|
||||
static void __srcu_transition_to_big(struct srcu_struct *ssp)
|
||||
{
|
||||
lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
|
||||
smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initiate an idempotent transition to SRCU_SIZE_BIG.
|
||||
*/
|
||||
static void srcu_transition_to_big(struct srcu_struct *ssp)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* Double-checked locking on ->srcu_size-state. */
|
||||
if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
|
||||
return;
|
||||
spin_lock_irqsave_rcu_node(ssp, flags);
|
||||
if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
|
||||
spin_unlock_irqrestore_rcu_node(ssp, flags);
|
||||
return;
|
||||
}
|
||||
__srcu_transition_to_big(ssp);
|
||||
spin_unlock_irqrestore_rcu_node(ssp, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to see if the just-encountered contention event justifies
|
||||
* a transition to SRCU_SIZE_BIG.
|
||||
*/
|
||||
static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
|
||||
{
|
||||
unsigned long j;
|
||||
|
||||
if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
|
||||
return;
|
||||
j = jiffies;
|
||||
if (ssp->srcu_size_jiffies != j) {
|
||||
ssp->srcu_size_jiffies = j;
|
||||
ssp->srcu_n_lock_retries = 0;
|
||||
}
|
||||
if (++ssp->srcu_n_lock_retries <= small_contention_lim)
|
||||
return;
|
||||
__srcu_transition_to_big(ssp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire the specified srcu_data structure's ->lock, but check for
|
||||
* excessive contention, which results in initiation of a transition
|
||||
* to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
|
||||
* parameter permits this.
|
||||
*/
|
||||
static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
|
||||
{
|
||||
struct srcu_struct *ssp = sdp->ssp;
|
||||
|
||||
if (spin_trylock_irqsave_rcu_node(sdp, *flags))
|
||||
return;
|
||||
spin_lock_irqsave_rcu_node(ssp, *flags);
|
||||
spin_lock_irqsave_check_contention(ssp);
|
||||
spin_unlock_irqrestore_rcu_node(ssp, *flags);
|
||||
spin_lock_irqsave_rcu_node(sdp, *flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire the specified srcu_struct structure's ->lock, but check for
|
||||
* excessive contention, which results in initiation of a transition
|
||||
* to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
|
||||
* parameter permits this.
|
||||
*/
|
||||
static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
|
||||
{
|
||||
if (spin_trylock_irqsave_rcu_node(ssp, *flags))
|
||||
return;
|
||||
spin_lock_irqsave_rcu_node(ssp, *flags);
|
||||
spin_lock_irqsave_check_contention(ssp);
|
||||
}
|
||||
|
||||
/*
|
||||
* First-use initialization of statically allocated srcu_struct
|
||||
* structure. Wiring up the combining tree is more than can be
|
||||
@ -343,7 +511,10 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
|
||||
return sum;
|
||||
}
|
||||
|
||||
#define SRCU_INTERVAL 1
|
||||
#define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
|
||||
#define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
|
||||
#define SRCU_MAX_NODELAY_PHASE 1 // Maximum per-GP-phase consecutive no-delay instances.
|
||||
#define SRCU_MAX_NODELAY 100 // Maximum consecutive no-delay instances.
|
||||
|
||||
/*
|
||||
* Return grace-period delay, zero if there are expedited grace
|
||||
@ -351,10 +522,18 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
|
||||
*/
|
||||
static unsigned long srcu_get_delay(struct srcu_struct *ssp)
|
||||
{
|
||||
if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
|
||||
READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
|
||||
return 0;
|
||||
return SRCU_INTERVAL;
|
||||
unsigned long jbase = SRCU_INTERVAL;
|
||||
|
||||
if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
|
||||
jbase = 0;
|
||||
if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)))
|
||||
jbase += jiffies - READ_ONCE(ssp->srcu_gp_start);
|
||||
if (!jbase) {
|
||||
WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
|
||||
if (READ_ONCE(ssp->srcu_n_exp_nodelay) > SRCU_MAX_NODELAY_PHASE)
|
||||
jbase = 1;
|
||||
}
|
||||
return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -382,13 +561,20 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
|
||||
return; /* Forgot srcu_barrier(), so just leak it! */
|
||||
}
|
||||
if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
|
||||
WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
|
||||
WARN_ON(srcu_readers_active(ssp))) {
|
||||
pr_info("%s: Active srcu_struct %p state: %d\n",
|
||||
__func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
|
||||
pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
|
||||
__func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
|
||||
rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
|
||||
return; /* Caller forgot to stop doing call_srcu()? */
|
||||
}
|
||||
free_percpu(ssp->sda);
|
||||
ssp->sda = NULL;
|
||||
if (!ssp->sda_is_static) {
|
||||
free_percpu(ssp->sda);
|
||||
ssp->sda = NULL;
|
||||
}
|
||||
kfree(ssp->node);
|
||||
ssp->node = NULL;
|
||||
ssp->srcu_size_state = SRCU_SIZE_SMALL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
|
||||
|
||||
@ -434,9 +620,13 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
||||
*/
|
||||
static void srcu_gp_start(struct srcu_struct *ssp)
|
||||
{
|
||||
struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
|
||||
struct srcu_data *sdp;
|
||||
int state;
|
||||
|
||||
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
|
||||
sdp = per_cpu_ptr(ssp->sda, 0);
|
||||
else
|
||||
sdp = this_cpu_ptr(ssp->sda);
|
||||
lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
|
||||
WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
|
||||
spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
|
||||
@ -445,6 +635,8 @@ static void srcu_gp_start(struct srcu_struct *ssp)
|
||||
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
|
||||
rcu_seq_snap(&ssp->srcu_gp_seq));
|
||||
spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
|
||||
WRITE_ONCE(ssp->srcu_gp_start, jiffies);
|
||||
WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
|
||||
smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
|
||||
rcu_seq_start(&ssp->srcu_gp_seq);
|
||||
state = rcu_seq_state(ssp->srcu_gp_seq);
|
||||
@ -517,7 +709,9 @@ static void srcu_gp_end(struct srcu_struct *ssp)
|
||||
int idx;
|
||||
unsigned long mask;
|
||||
struct srcu_data *sdp;
|
||||
unsigned long sgsne;
|
||||
struct srcu_node *snp;
|
||||
int ss_state;
|
||||
|
||||
/* Prevent more than one additional grace period. */
|
||||
mutex_lock(&ssp->srcu_cb_mutex);
|
||||
@ -526,7 +720,7 @@ static void srcu_gp_end(struct srcu_struct *ssp)
|
||||
spin_lock_irq_rcu_node(ssp);
|
||||
idx = rcu_seq_state(ssp->srcu_gp_seq);
|
||||
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
|
||||
cbdelay = srcu_get_delay(ssp);
|
||||
cbdelay = !!srcu_get_delay(ssp);
|
||||
WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
|
||||
rcu_seq_end(&ssp->srcu_gp_seq);
|
||||
gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
|
||||
@ -537,38 +731,45 @@ static void srcu_gp_end(struct srcu_struct *ssp)
|
||||
/* A new grace period can start at this point. But only one. */
|
||||
|
||||
/* Initiate callback invocation as needed. */
|
||||
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
|
||||
srcu_for_each_node_breadth_first(ssp, snp) {
|
||||
spin_lock_irq_rcu_node(snp);
|
||||
cbs = false;
|
||||
last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
|
||||
if (last_lvl)
|
||||
cbs = snp->srcu_have_cbs[idx] == gpseq;
|
||||
snp->srcu_have_cbs[idx] = gpseq;
|
||||
rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
|
||||
if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
|
||||
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
|
||||
mask = snp->srcu_data_have_cbs[idx];
|
||||
snp->srcu_data_have_cbs[idx] = 0;
|
||||
spin_unlock_irq_rcu_node(snp);
|
||||
if (cbs)
|
||||
srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
|
||||
|
||||
/* Occasionally prevent srcu_data counter wrap. */
|
||||
if (!(gpseq & counter_wrap_check) && last_lvl)
|
||||
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
|
||||
sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||
if (ULONG_CMP_GE(gpseq,
|
||||
sdp->srcu_gp_seq_needed + 100))
|
||||
sdp->srcu_gp_seq_needed = gpseq;
|
||||
if (ULONG_CMP_GE(gpseq,
|
||||
sdp->srcu_gp_seq_needed_exp + 100))
|
||||
sdp->srcu_gp_seq_needed_exp = gpseq;
|
||||
spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||
}
|
||||
ss_state = smp_load_acquire(&ssp->srcu_size_state);
|
||||
if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
|
||||
srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
|
||||
} else {
|
||||
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
|
||||
srcu_for_each_node_breadth_first(ssp, snp) {
|
||||
spin_lock_irq_rcu_node(snp);
|
||||
cbs = false;
|
||||
last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
|
||||
if (last_lvl)
|
||||
cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
|
||||
snp->srcu_have_cbs[idx] = gpseq;
|
||||
rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
|
||||
sgsne = snp->srcu_gp_seq_needed_exp;
|
||||
if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
|
||||
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
|
||||
if (ss_state < SRCU_SIZE_BIG)
|
||||
mask = ~0;
|
||||
else
|
||||
mask = snp->srcu_data_have_cbs[idx];
|
||||
snp->srcu_data_have_cbs[idx] = 0;
|
||||
spin_unlock_irq_rcu_node(snp);
|
||||
if (cbs)
|
||||
srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
|
||||
}
|
||||
}
|
||||
|
||||
/* Occasionally prevent srcu_data counter wrap. */
|
||||
if (!(gpseq & counter_wrap_check))
|
||||
for_each_possible_cpu(cpu) {
|
||||
sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||
if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
|
||||
sdp->srcu_gp_seq_needed = gpseq;
|
||||
if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
|
||||
sdp->srcu_gp_seq_needed_exp = gpseq;
|
||||
spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||
}
|
||||
|
||||
/* Callback initiation done, allow grace periods after next. */
|
||||
mutex_unlock(&ssp->srcu_cb_mutex);
|
||||
|
||||
@ -583,6 +784,14 @@ static void srcu_gp_end(struct srcu_struct *ssp)
|
||||
} else {
|
||||
spin_unlock_irq_rcu_node(ssp);
|
||||
}
|
||||
|
||||
/* Transition to big if needed. */
|
||||
if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
|
||||
if (ss_state == SRCU_SIZE_ALLOC)
|
||||
init_srcu_struct_nodes(ssp, GFP_KERNEL);
|
||||
else
|
||||
smp_store_release(&ssp->srcu_size_state, ss_state + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -596,20 +805,24 @@ static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp
|
||||
unsigned long s)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long sgsne;
|
||||
|
||||
for (; snp != NULL; snp = snp->srcu_parent) {
|
||||
if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
|
||||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
|
||||
return;
|
||||
spin_lock_irqsave_rcu_node(snp, flags);
|
||||
if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
|
||||
if (snp)
|
||||
for (; snp != NULL; snp = snp->srcu_parent) {
|
||||
sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
|
||||
if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
|
||||
(!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
|
||||
return;
|
||||
spin_lock_irqsave_rcu_node(snp, flags);
|
||||
sgsne = snp->srcu_gp_seq_needed_exp;
|
||||
if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
|
||||
spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||
return;
|
||||
}
|
||||
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
|
||||
spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||
return;
|
||||
}
|
||||
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
|
||||
spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||
}
|
||||
spin_lock_irqsave_rcu_node(ssp, flags);
|
||||
spin_lock_irqsave_ssp_contention(ssp, &flags);
|
||||
if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
|
||||
WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
|
||||
spin_unlock_irqrestore_rcu_node(ssp, flags);
|
||||
@ -630,39 +843,47 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
|
||||
{
|
||||
unsigned long flags;
|
||||
int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
|
||||
struct srcu_node *snp = sdp->mynode;
|
||||
unsigned long sgsne;
|
||||
struct srcu_node *snp;
|
||||
struct srcu_node *snp_leaf;
|
||||
unsigned long snp_seq;
|
||||
|
||||
/* Each pass through the loop does one level of the srcu_node tree. */
|
||||
for (; snp != NULL; snp = snp->srcu_parent) {
|
||||
if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
|
||||
return; /* GP already done and CBs recorded. */
|
||||
spin_lock_irqsave_rcu_node(snp, flags);
|
||||
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
|
||||
/* Ensure that snp node tree is fully initialized before traversing it */
|
||||
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
|
||||
snp_leaf = NULL;
|
||||
else
|
||||
snp_leaf = sdp->mynode;
|
||||
|
||||
if (snp_leaf)
|
||||
/* Each pass through the loop does one level of the srcu_node tree. */
|
||||
for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
|
||||
if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
|
||||
return; /* GP already done and CBs recorded. */
|
||||
spin_lock_irqsave_rcu_node(snp, flags);
|
||||
snp_seq = snp->srcu_have_cbs[idx];
|
||||
if (snp == sdp->mynode && snp_seq == s)
|
||||
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
|
||||
spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||
if (snp == sdp->mynode && snp_seq != s) {
|
||||
srcu_schedule_cbs_sdp(sdp, do_norm
|
||||
? SRCU_INTERVAL
|
||||
: 0);
|
||||
if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
|
||||
if (snp == snp_leaf && snp_seq == s)
|
||||
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
|
||||
spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||
if (snp == snp_leaf && snp_seq != s) {
|
||||
srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
|
||||
return;
|
||||
}
|
||||
if (!do_norm)
|
||||
srcu_funnel_exp_start(ssp, snp, s);
|
||||
return;
|
||||
}
|
||||
if (!do_norm)
|
||||
srcu_funnel_exp_start(ssp, snp, s);
|
||||
return;
|
||||
snp->srcu_have_cbs[idx] = s;
|
||||
if (snp == snp_leaf)
|
||||
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
|
||||
sgsne = snp->srcu_gp_seq_needed_exp;
|
||||
if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
|
||||
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
|
||||
spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||
}
|
||||
snp->srcu_have_cbs[idx] = s;
|
||||
if (snp == sdp->mynode)
|
||||
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
|
||||
if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
|
||||
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
|
||||
spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||
}
|
||||
|
||||
/* Top of tree, must ensure the grace period will be started. */
|
||||
spin_lock_irqsave_rcu_node(ssp, flags);
|
||||
spin_lock_irqsave_ssp_contention(ssp, &flags);
|
||||
if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
|
||||
/*
|
||||
* Record need for grace period s. Pair with load
|
||||
@ -678,9 +899,15 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
|
||||
rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
|
||||
WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
|
||||
srcu_gp_start(ssp);
|
||||
|
||||
// And how can that list_add() in the "else" clause
|
||||
// possibly be safe for concurrent execution? Well,
|
||||
// it isn't. And it does not have to be. After all, it
|
||||
// can only be executed during early boot when there is only
|
||||
// the one boot CPU running with interrupts still disabled.
|
||||
if (likely(srcu_init_done))
|
||||
queue_delayed_work(rcu_gp_wq, &ssp->work,
|
||||
srcu_get_delay(ssp));
|
||||
!!srcu_get_delay(ssp));
|
||||
else if (list_empty(&ssp->work.work.entry))
|
||||
list_add(&ssp->work.work.entry, &srcu_boot_list);
|
||||
}
|
||||
@ -814,11 +1041,17 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
|
||||
bool needgp = false;
|
||||
unsigned long s;
|
||||
struct srcu_data *sdp;
|
||||
struct srcu_node *sdp_mynode;
|
||||
int ss_state;
|
||||
|
||||
check_init_srcu_struct(ssp);
|
||||
idx = srcu_read_lock(ssp);
|
||||
sdp = raw_cpu_ptr(ssp->sda);
|
||||
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||
ss_state = smp_load_acquire(&ssp->srcu_size_state);
|
||||
if (ss_state < SRCU_SIZE_WAIT_CALL)
|
||||
sdp = per_cpu_ptr(ssp->sda, 0);
|
||||
else
|
||||
sdp = raw_cpu_ptr(ssp->sda);
|
||||
spin_lock_irqsave_sdp_contention(sdp, &flags);
|
||||
if (rhp)
|
||||
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
|
||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||
@ -834,10 +1067,17 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
|
||||
needexp = true;
|
||||
}
|
||||
spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||
|
||||
/* Ensure that snp node tree is fully initialized before traversing it */
|
||||
if (ss_state < SRCU_SIZE_WAIT_BARRIER)
|
||||
sdp_mynode = NULL;
|
||||
else
|
||||
sdp_mynode = sdp->mynode;
|
||||
|
||||
if (needgp)
|
||||
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
|
||||
else if (needexp)
|
||||
srcu_funnel_exp_start(ssp, sdp->mynode, s);
|
||||
srcu_funnel_exp_start(ssp, sdp_mynode, s);
|
||||
srcu_read_unlock(ssp, idx);
|
||||
return s;
|
||||
}
|
||||
@ -1097,6 +1337,28 @@ static void srcu_barrier_cb(struct rcu_head *rhp)
|
||||
complete(&ssp->srcu_barrier_completion);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enqueue an srcu_barrier() callback on the specified srcu_data
|
||||
* structure's ->cblist. but only if that ->cblist already has at least one
|
||||
* callback enqueued. Note that if a CPU already has callbacks enqueue,
|
||||
* it must have already registered the need for a future grace period,
|
||||
* so all we need do is enqueue a callback that will use the same grace
|
||||
* period as the last callback already in the queue.
|
||||
*/
|
||||
static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
|
||||
{
|
||||
spin_lock_irq_rcu_node(sdp);
|
||||
atomic_inc(&ssp->srcu_barrier_cpu_cnt);
|
||||
sdp->srcu_barrier_head.func = srcu_barrier_cb;
|
||||
debug_rcu_head_queue(&sdp->srcu_barrier_head);
|
||||
if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
|
||||
&sdp->srcu_barrier_head)) {
|
||||
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
|
||||
atomic_dec(&ssp->srcu_barrier_cpu_cnt);
|
||||
}
|
||||
spin_unlock_irq_rcu_node(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
* srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
|
||||
* @ssp: srcu_struct on which to wait for in-flight callbacks.
|
||||
@ -1104,7 +1366,7 @@ static void srcu_barrier_cb(struct rcu_head *rhp)
|
||||
void srcu_barrier(struct srcu_struct *ssp)
|
||||
{
|
||||
int cpu;
|
||||
struct srcu_data *sdp;
|
||||
int idx;
|
||||
unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
|
||||
|
||||
check_init_srcu_struct(ssp);
|
||||
@ -1120,27 +1382,13 @@ void srcu_barrier(struct srcu_struct *ssp)
|
||||
/* Initial count prevents reaching zero until all CBs are posted. */
|
||||
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
|
||||
|
||||
/*
|
||||
* Each pass through this loop enqueues a callback, but only
|
||||
* on CPUs already having callbacks enqueued. Note that if
|
||||
* a CPU already has callbacks enqueue, it must have already
|
||||
* registered the need for a future grace period, so all we
|
||||
* need do is enqueue a callback that will use the same
|
||||
* grace period as the last callback already in the queue.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
spin_lock_irq_rcu_node(sdp);
|
||||
atomic_inc(&ssp->srcu_barrier_cpu_cnt);
|
||||
sdp->srcu_barrier_head.func = srcu_barrier_cb;
|
||||
debug_rcu_head_queue(&sdp->srcu_barrier_head);
|
||||
if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
|
||||
&sdp->srcu_barrier_head)) {
|
||||
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
|
||||
atomic_dec(&ssp->srcu_barrier_cpu_cnt);
|
||||
}
|
||||
spin_unlock_irq_rcu_node(sdp);
|
||||
}
|
||||
idx = srcu_read_lock(ssp);
|
||||
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
|
||||
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
|
||||
else
|
||||
for_each_possible_cpu(cpu)
|
||||
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
|
||||
srcu_read_unlock(ssp, idx);
|
||||
|
||||
/* Remove the initial count, at which point reaching zero can happen. */
|
||||
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
|
||||
@ -1214,6 +1462,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
|
||||
srcu_flip(ssp);
|
||||
spin_lock_irq_rcu_node(ssp);
|
||||
rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
|
||||
ssp->srcu_n_exp_nodelay = 0;
|
||||
spin_unlock_irq_rcu_node(ssp);
|
||||
}
|
||||
|
||||
@ -1228,6 +1477,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
|
||||
mutex_unlock(&ssp->srcu_gp_mutex);
|
||||
return; /* readers present, retry later. */
|
||||
}
|
||||
ssp->srcu_n_exp_nodelay = 0;
|
||||
srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
|
||||
}
|
||||
}
|
||||
@ -1318,12 +1568,28 @@ static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
|
||||
*/
|
||||
static void process_srcu(struct work_struct *work)
|
||||
{
|
||||
unsigned long curdelay;
|
||||
unsigned long j;
|
||||
struct srcu_struct *ssp;
|
||||
|
||||
ssp = container_of(work, struct srcu_struct, work.work);
|
||||
|
||||
srcu_advance_state(ssp);
|
||||
srcu_reschedule(ssp, srcu_get_delay(ssp));
|
||||
curdelay = srcu_get_delay(ssp);
|
||||
if (curdelay) {
|
||||
WRITE_ONCE(ssp->reschedule_count, 0);
|
||||
} else {
|
||||
j = jiffies;
|
||||
if (READ_ONCE(ssp->reschedule_jiffies) == j) {
|
||||
WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
|
||||
if (READ_ONCE(ssp->reschedule_count) > SRCU_MAX_NODELAY)
|
||||
curdelay = 1;
|
||||
} else {
|
||||
WRITE_ONCE(ssp->reschedule_count, 1);
|
||||
WRITE_ONCE(ssp->reschedule_jiffies, j);
|
||||
}
|
||||
}
|
||||
srcu_reschedule(ssp, curdelay);
|
||||
}
|
||||
|
||||
void srcutorture_get_gp_data(enum rcutorture_type test_type,
|
||||
@ -1337,43 +1603,69 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
|
||||
|
||||
static const char * const srcu_size_state_name[] = {
|
||||
"SRCU_SIZE_SMALL",
|
||||
"SRCU_SIZE_ALLOC",
|
||||
"SRCU_SIZE_WAIT_BARRIER",
|
||||
"SRCU_SIZE_WAIT_CALL",
|
||||
"SRCU_SIZE_WAIT_CBS1",
|
||||
"SRCU_SIZE_WAIT_CBS2",
|
||||
"SRCU_SIZE_WAIT_CBS3",
|
||||
"SRCU_SIZE_WAIT_CBS4",
|
||||
"SRCU_SIZE_BIG",
|
||||
"SRCU_SIZE_???",
|
||||
};
|
||||
|
||||
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
|
||||
{
|
||||
int cpu;
|
||||
int idx;
|
||||
unsigned long s0 = 0, s1 = 0;
|
||||
int ss_state = READ_ONCE(ssp->srcu_size_state);
|
||||
int ss_state_idx = ss_state;
|
||||
|
||||
idx = ssp->srcu_idx & 0x1;
|
||||
pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
|
||||
tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
|
||||
for_each_possible_cpu(cpu) {
|
||||
unsigned long l0, l1;
|
||||
unsigned long u0, u1;
|
||||
long c0, c1;
|
||||
struct srcu_data *sdp;
|
||||
if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
|
||||
ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
|
||||
pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
|
||||
tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
|
||||
srcu_size_state_name[ss_state_idx]);
|
||||
if (!ssp->sda) {
|
||||
// Called after cleanup_srcu_struct(), perhaps.
|
||||
pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
|
||||
} else {
|
||||
pr_cont(" per-CPU(idx=%d):", idx);
|
||||
for_each_possible_cpu(cpu) {
|
||||
unsigned long l0, l1;
|
||||
unsigned long u0, u1;
|
||||
long c0, c1;
|
||||
struct srcu_data *sdp;
|
||||
|
||||
sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
u0 = data_race(sdp->srcu_unlock_count[!idx]);
|
||||
u1 = data_race(sdp->srcu_unlock_count[idx]);
|
||||
sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
u0 = data_race(sdp->srcu_unlock_count[!idx]);
|
||||
u1 = data_race(sdp->srcu_unlock_count[idx]);
|
||||
|
||||
/*
|
||||
* Make sure that a lock is always counted if the corresponding
|
||||
* unlock is counted.
|
||||
*/
|
||||
smp_rmb();
|
||||
/*
|
||||
* Make sure that a lock is always counted if the corresponding
|
||||
* unlock is counted.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
l0 = data_race(sdp->srcu_lock_count[!idx]);
|
||||
l1 = data_race(sdp->srcu_lock_count[idx]);
|
||||
l0 = data_race(sdp->srcu_lock_count[!idx]);
|
||||
l1 = data_race(sdp->srcu_lock_count[idx]);
|
||||
|
||||
c0 = l0 - u0;
|
||||
c1 = l1 - u1;
|
||||
pr_cont(" %d(%ld,%ld %c)",
|
||||
cpu, c0, c1,
|
||||
"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
|
||||
s0 += c0;
|
||||
s1 += c1;
|
||||
c0 = l0 - u0;
|
||||
c1 = l1 - u1;
|
||||
pr_cont(" %d(%ld,%ld %c)",
|
||||
cpu, c0, c1,
|
||||
"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
|
||||
s0 += c0;
|
||||
s1 += c1;
|
||||
}
|
||||
pr_cont(" T(%ld,%ld)\n", s0, s1);
|
||||
}
|
||||
pr_cont(" T(%ld,%ld)\n", s0, s1);
|
||||
if (SRCU_SIZING_IS_TORTURE())
|
||||
srcu_transition_to_big(ssp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
|
||||
|
||||
@ -1390,6 +1682,17 @@ void __init srcu_init(void)
|
||||
{
|
||||
struct srcu_struct *ssp;
|
||||
|
||||
/* Decide on srcu_struct-size strategy. */
|
||||
if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
|
||||
if (nr_cpu_ids >= big_cpu_lim) {
|
||||
convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
|
||||
pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
|
||||
} else {
|
||||
convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
|
||||
pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Once that is set, call_srcu() can follow the normal path and
|
||||
* queue delayed work. This must follow RCU workqueues creation
|
||||
@ -1400,6 +1703,8 @@ void __init srcu_init(void)
|
||||
ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
|
||||
work.work.entry);
|
||||
list_del_init(&ssp->work.work.entry);
|
||||
if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
|
||||
ssp->srcu_size_state = SRCU_SIZE_ALLOC;
|
||||
queue_work(rcu_gp_wq, &ssp->work.work);
|
||||
}
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ static void rcu_sync_func(struct rcu_head *rhp)
|
||||
* a slowpath during the update. After this function returns, all
|
||||
* subsequent calls to rcu_sync_is_idle() will return false, which
|
||||
* tells readers to stay off their fastpaths. A later call to
|
||||
* rcu_sync_exit() re-enables reader slowpaths.
|
||||
* rcu_sync_exit() re-enables reader fastpaths.
|
||||
*
|
||||
* When called in isolation, rcu_sync_enter() must wait for a grace
|
||||
* period, however, closely spaced calls to rcu_sync_enter() can
|
||||
|
@ -46,7 +46,7 @@ struct rcu_tasks_percpu {
|
||||
|
||||
/**
|
||||
* struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
|
||||
* @cbs_wq: Wait queue allowing new callback to get kthread's attention.
|
||||
* @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
|
||||
* @cbs_gbl_lock: Lock protecting callback list.
|
||||
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
|
||||
* @gp_func: This flavor's grace-period-wait function.
|
||||
@ -77,7 +77,7 @@ struct rcu_tasks_percpu {
|
||||
* @kname: This flavor's kthread name.
|
||||
*/
|
||||
struct rcu_tasks {
|
||||
struct wait_queue_head cbs_wq;
|
||||
struct rcuwait cbs_wait;
|
||||
raw_spinlock_t cbs_gbl_lock;
|
||||
int gp_state;
|
||||
int gp_sleep;
|
||||
@ -113,11 +113,11 @@ static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
|
||||
#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
|
||||
static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
|
||||
.rtp_irq_work = IRQ_WORK_INIT(call_rcu_tasks_iw_wakeup), \
|
||||
.rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
|
||||
}; \
|
||||
static struct rcu_tasks rt_name = \
|
||||
{ \
|
||||
.cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
|
||||
.cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
|
||||
.cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
|
||||
.gp_func = gp, \
|
||||
.call_func = call, \
|
||||
@ -143,6 +143,11 @@ module_param(rcu_task_ipi_delay, int, 0644);
|
||||
#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
|
||||
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
|
||||
module_param(rcu_task_stall_timeout, int, 0644);
|
||||
#define RCU_TASK_STALL_INFO (HZ * 10)
|
||||
static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
|
||||
module_param(rcu_task_stall_info, int, 0644);
|
||||
static int rcu_task_stall_info_mult __read_mostly = 3;
|
||||
module_param(rcu_task_stall_info_mult, int, 0444);
|
||||
|
||||
static int rcu_task_enqueue_lim __read_mostly = -1;
|
||||
module_param(rcu_task_enqueue_lim, int, 0444);
|
||||
@ -261,14 +266,16 @@ static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
|
||||
struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
|
||||
|
||||
rtp = rtpcp->rtpp;
|
||||
wake_up(&rtp->cbs_wq);
|
||||
rcuwait_wake_up(&rtp->cbs_wait);
|
||||
}
|
||||
|
||||
// Enqueue a callback for the specified flavor of Tasks RCU.
|
||||
static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
|
||||
struct rcu_tasks *rtp)
|
||||
{
|
||||
int chosen_cpu;
|
||||
unsigned long flags;
|
||||
int ideal_cpu;
|
||||
unsigned long j;
|
||||
bool needadjust = false;
|
||||
bool needwake;
|
||||
@ -278,8 +285,9 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
|
||||
rhp->func = func;
|
||||
local_irq_save(flags);
|
||||
rcu_read_lock();
|
||||
rtpcp = per_cpu_ptr(rtp->rtpcpu,
|
||||
smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift));
|
||||
ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
|
||||
chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
|
||||
rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
|
||||
if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
|
||||
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
|
||||
j = jiffies;
|
||||
@ -460,7 +468,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
|
||||
}
|
||||
}
|
||||
|
||||
if (rcu_segcblist_empty(&rtpcp->cblist))
|
||||
if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
|
||||
return;
|
||||
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
|
||||
rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
|
||||
@ -509,7 +517,9 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
||||
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
|
||||
|
||||
/* If there were none, wait a bit and start over. */
|
||||
wait_event_idle(rtp->cbs_wq, (needgpcb = rcu_tasks_need_gpcb(rtp)));
|
||||
rcuwait_wait_event(&rtp->cbs_wait,
|
||||
(needgpcb = rcu_tasks_need_gpcb(rtp)),
|
||||
TASK_IDLE);
|
||||
|
||||
if (needgpcb & 0x2) {
|
||||
// Wait for one grace period.
|
||||
@ -548,8 +558,15 @@ static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
|
||||
static void __init rcu_tasks_bootup_oddness(void)
|
||||
{
|
||||
#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
|
||||
int rtsimc;
|
||||
|
||||
if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
|
||||
pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
|
||||
rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
|
||||
if (rtsimc != rcu_task_stall_info_mult) {
|
||||
pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
|
||||
rcu_task_stall_info_mult = rtsimc;
|
||||
}
|
||||
#endif /* #ifdef CONFIG_TASKS_RCU */
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
|
||||
@ -568,7 +585,17 @@ static void __init rcu_tasks_bootup_oddness(void)
|
||||
/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
|
||||
static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
|
||||
{
|
||||
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each...
|
||||
int cpu;
|
||||
bool havecbs = false;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
|
||||
|
||||
if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
|
||||
havecbs = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
|
||||
rtp->kname,
|
||||
tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
|
||||
@ -576,7 +603,7 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
|
||||
data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
|
||||
data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
|
||||
".k"[!!data_race(rtp->kthread_ptr)],
|
||||
".C"[!data_race(rcu_segcblist_empty(&rtpcp->cblist))],
|
||||
".C"[havecbs],
|
||||
s);
|
||||
}
|
||||
#endif // #ifndef CONFIG_TINY_RCU
|
||||
@ -592,10 +619,15 @@ static void exit_tasks_rcu_finish_trace(struct task_struct *t);
|
||||
/* Wait for one RCU-tasks grace period. */
|
||||
static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
|
||||
{
|
||||
struct task_struct *g, *t;
|
||||
unsigned long lastreport;
|
||||
LIST_HEAD(holdouts);
|
||||
struct task_struct *g;
|
||||
int fract;
|
||||
LIST_HEAD(holdouts);
|
||||
unsigned long j;
|
||||
unsigned long lastinfo;
|
||||
unsigned long lastreport;
|
||||
bool reported = false;
|
||||
int rtsi;
|
||||
struct task_struct *t;
|
||||
|
||||
set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
|
||||
rtp->pregp_func();
|
||||
@ -621,30 +653,50 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
|
||||
* is empty, we are done.
|
||||
*/
|
||||
lastreport = jiffies;
|
||||
lastinfo = lastreport;
|
||||
rtsi = READ_ONCE(rcu_task_stall_info);
|
||||
|
||||
// Start off with initial wait and slowly back off to 1 HZ wait.
|
||||
fract = rtp->init_fract;
|
||||
|
||||
while (!list_empty(&holdouts)) {
|
||||
ktime_t exp;
|
||||
bool firstreport;
|
||||
bool needreport;
|
||||
int rtst;
|
||||
|
||||
/* Slowly back off waiting for holdouts */
|
||||
// Slowly back off waiting for holdouts
|
||||
set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
|
||||
schedule_timeout_idle(fract);
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||||
schedule_timeout_idle(fract);
|
||||
} else {
|
||||
exp = jiffies_to_nsecs(fract);
|
||||
__set_current_state(TASK_IDLE);
|
||||
schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
|
||||
}
|
||||
|
||||
if (fract < HZ)
|
||||
fract++;
|
||||
|
||||
rtst = READ_ONCE(rcu_task_stall_timeout);
|
||||
needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
|
||||
if (needreport)
|
||||
if (needreport) {
|
||||
lastreport = jiffies;
|
||||
reported = true;
|
||||
}
|
||||
firstreport = true;
|
||||
WARN_ON(signal_pending(current));
|
||||
set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
|
||||
rtp->holdouts_func(&holdouts, needreport, &firstreport);
|
||||
|
||||
// Print pre-stall informational messages if needed.
|
||||
j = jiffies;
|
||||
if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
|
||||
lastinfo = j;
|
||||
rtsi = rtsi * rcu_task_stall_info_mult;
|
||||
pr_info("%s: %s grace period %lu is %lu jiffies old.\n",
|
||||
__func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
|
||||
}
|
||||
}
|
||||
|
||||
set_tasks_gp_state(rtp, RTGS_POST_GP);
|
||||
@ -950,6 +1002,9 @@ static void rcu_tasks_be_rude(struct work_struct *work)
|
||||
// Wait for one rude RCU-tasks grace period.
|
||||
static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
|
||||
{
|
||||
if (num_online_cpus() <= 1)
|
||||
return; // Fastpath for only one CPU.
|
||||
|
||||
rtp->n_ipis += cpumask_weight(cpu_online_mask);
|
||||
schedule_on_each_cpu(rcu_tasks_be_rude);
|
||||
}
|
||||
|
@ -1679,6 +1679,8 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
|
||||
if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
|
||||
WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
|
||||
if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
|
||||
WRITE_ONCE(rdp->last_sched_clock, jiffies);
|
||||
WRITE_ONCE(rdp->gpwrap, false);
|
||||
rcu_gpnum_ovf(rnp, rdp);
|
||||
return ret;
|
||||
@ -1705,11 +1707,37 @@ static void note_gp_changes(struct rcu_data *rdp)
|
||||
rcu_gp_kthread_wake();
|
||||
}
|
||||
|
||||
static atomic_t *rcu_gp_slow_suppress;
|
||||
|
||||
/* Register a counter to suppress debugging grace-period delays. */
|
||||
void rcu_gp_slow_register(atomic_t *rgssp)
|
||||
{
|
||||
WARN_ON_ONCE(rcu_gp_slow_suppress);
|
||||
|
||||
WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
|
||||
|
||||
/* Unregister a counter, with NULL for not caring which. */
|
||||
void rcu_gp_slow_unregister(atomic_t *rgssp)
|
||||
{
|
||||
WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
|
||||
|
||||
WRITE_ONCE(rcu_gp_slow_suppress, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
|
||||
|
||||
static bool rcu_gp_slow_is_suppressed(void)
|
||||
{
|
||||
atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
|
||||
|
||||
return rgssp && atomic_read(rgssp);
|
||||
}
|
||||
|
||||
static void rcu_gp_slow(int delay)
|
||||
{
|
||||
if (delay > 0 &&
|
||||
!(rcu_seq_ctr(rcu_state.gp_seq) %
|
||||
(rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
|
||||
if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
|
||||
!(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
|
||||
schedule_timeout_idle(delay);
|
||||
}
|
||||
|
||||
@ -2096,14 +2124,29 @@ static noinline void rcu_gp_cleanup(void)
|
||||
/* Advance CBs to reduce false positives below. */
|
||||
offloaded = rcu_rdp_is_offloaded(rdp);
|
||||
if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
|
||||
|
||||
// We get here if a grace period was needed (“needgp”)
|
||||
// and the above call to rcu_accelerate_cbs() did not set
|
||||
// the RCU_GP_FLAG_INIT bit in ->gp_state (which records
|
||||
// the need for another grace period). The purpose
|
||||
// of the “offloaded” check is to avoid invoking
|
||||
// rcu_accelerate_cbs() on an offloaded CPU because we do not
|
||||
// hold the ->nocb_lock needed to safely access an offloaded
|
||||
// ->cblist. We do not want to acquire that lock because
|
||||
// it can be heavily contended during callback floods.
|
||||
|
||||
WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
|
||||
WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
|
||||
trace_rcu_grace_period(rcu_state.name,
|
||||
rcu_state.gp_seq,
|
||||
TPS("newreq"));
|
||||
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
|
||||
} else {
|
||||
WRITE_ONCE(rcu_state.gp_flags,
|
||||
rcu_state.gp_flags & RCU_GP_FLAG_INIT);
|
||||
|
||||
// We get here either if there is no need for an
|
||||
// additional grace period or if rcu_accelerate_cbs() has
|
||||
// already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
|
||||
// So all we need to do is to clear all of the other
|
||||
// ->gp_flags bits.
|
||||
|
||||
WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
|
||||
}
|
||||
raw_spin_unlock_irq_rcu_node(rnp);
|
||||
|
||||
@ -2609,6 +2652,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
*/
|
||||
void rcu_sched_clock_irq(int user)
|
||||
{
|
||||
unsigned long j;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PROVE_RCU)) {
|
||||
j = jiffies;
|
||||
WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
|
||||
__this_cpu_write(rcu_data.last_sched_clock, j);
|
||||
}
|
||||
trace_rcu_utilization(TPS("Start scheduler-tick"));
|
||||
lockdep_assert_irqs_disabled();
|
||||
raw_cpu_inc(rcu_data.ticks_this_gp);
|
||||
@ -2624,6 +2674,8 @@ void rcu_sched_clock_irq(int user)
|
||||
rcu_flavor_sched_clock_irq(user);
|
||||
if (rcu_pending(user))
|
||||
invoke_rcu_core();
|
||||
if (user)
|
||||
rcu_tasks_classic_qs(current, false);
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
trace_rcu_utilization(TPS("End scheduler-tick"));
|
||||
@ -3717,7 +3769,9 @@ static int rcu_blocking_is_gp(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
// Invoking preempt_model_*() too early gets a splat.
|
||||
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE ||
|
||||
preempt_model_full() || preempt_model_rt())
|
||||
return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
|
||||
might_sleep(); /* Check for RCU read-side critical section. */
|
||||
preempt_disable();
|
||||
@ -4179,6 +4233,7 @@ rcu_boot_init_percpu_data(int cpu)
|
||||
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
|
||||
rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
|
||||
rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
|
||||
rdp->last_sched_clock = jiffies;
|
||||
rdp->cpu = cpu;
|
||||
rcu_boot_init_nocb_percpu_data(rdp);
|
||||
}
|
||||
@ -4480,6 +4535,7 @@ static int __init rcu_spawn_gp_kthread(void)
|
||||
struct rcu_node *rnp;
|
||||
struct sched_param sp;
|
||||
struct task_struct *t;
|
||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||
|
||||
rcu_scheduler_fully_active = 1;
|
||||
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
|
||||
@ -4497,8 +4553,14 @@ static int __init rcu_spawn_gp_kthread(void)
|
||||
smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
wake_up_process(t);
|
||||
rcu_spawn_nocb_kthreads();
|
||||
rcu_spawn_boost_kthreads();
|
||||
/* This is a pre-SMP initcall, we expect a single CPU */
|
||||
WARN_ON(num_online_cpus() > 1);
|
||||
/*
|
||||
* Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
|
||||
* due to rcu_scheduler_fully_active.
|
||||
*/
|
||||
rcu_spawn_cpu_nocb_kthread(smp_processor_id());
|
||||
rcu_spawn_one_boost_kthread(rdp->mynode);
|
||||
rcu_spawn_core_kthreads();
|
||||
return 0;
|
||||
}
|
||||
@ -4782,7 +4844,7 @@ static void __init kfree_rcu_batch_init(void)
|
||||
|
||||
void __init rcu_init(void)
|
||||
{
|
||||
int cpu;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
rcu_early_boot_tests();
|
||||
|
||||
@ -4802,11 +4864,10 @@ void __init rcu_init(void)
|
||||
* or the scheduler are operational.
|
||||
*/
|
||||
pm_notifier(rcu_pm_notify, 0);
|
||||
for_each_online_cpu(cpu) {
|
||||
rcutree_prepare_cpu(cpu);
|
||||
rcu_cpu_starting(cpu);
|
||||
rcutree_online_cpu(cpu);
|
||||
}
|
||||
WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
|
||||
rcutree_prepare_cpu(cpu);
|
||||
rcu_cpu_starting(cpu);
|
||||
rcutree_online_cpu(cpu);
|
||||
|
||||
/* Create workqueue for Tree SRCU and for expedited GPs. */
|
||||
rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
|
||||
|
@ -254,6 +254,7 @@ struct rcu_data {
|
||||
unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
|
||||
short rcu_onl_gp_flags; /* ->gp_flags at last online. */
|
||||
unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
|
||||
unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */
|
||||
|
||||
int cpu;
|
||||
};
|
||||
@ -364,6 +365,7 @@ struct rcu_state {
|
||||
arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
|
||||
/* Synchronize offline with */
|
||||
/* GP pre-initialization. */
|
||||
int nocb_is_setup; /* nocb is setup from boot */
|
||||
};
|
||||
|
||||
/* Values for rcu_state structure's gp_flags field. */
|
||||
@ -421,7 +423,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
||||
static bool rcu_is_callbacks_kthread(void);
|
||||
static void rcu_cpu_kthread_setup(unsigned int cpu);
|
||||
static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
|
||||
static void __init rcu_spawn_boost_kthreads(void);
|
||||
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
|
||||
static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
|
||||
static void rcu_preempt_deferred_qs(struct task_struct *t);
|
||||
@ -439,7 +440,6 @@ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
|
||||
static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
|
||||
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
|
||||
static void rcu_spawn_cpu_nocb_kthread(int cpu);
|
||||
static void __init rcu_spawn_nocb_kthreads(void);
|
||||
static void show_rcu_nocb_state(struct rcu_data *rdp);
|
||||
static void rcu_nocb_lock(struct rcu_data *rdp);
|
||||
static void rcu_nocb_unlock(struct rcu_data *rdp);
|
||||
|
@ -60,9 +60,6 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
|
||||
* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
|
||||
* If the list is invalid, a warning is emitted and all CPUs are offloaded.
|
||||
*/
|
||||
|
||||
static bool rcu_nocb_is_setup;
|
||||
|
||||
static int __init rcu_nocb_setup(char *str)
|
||||
{
|
||||
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
|
||||
@ -72,7 +69,7 @@ static int __init rcu_nocb_setup(char *str)
|
||||
cpumask_setall(rcu_nocb_mask);
|
||||
}
|
||||
}
|
||||
rcu_nocb_is_setup = true;
|
||||
rcu_state.nocb_is_setup = true;
|
||||
return 1;
|
||||
}
|
||||
__setup("rcu_nocbs", rcu_nocb_setup);
|
||||
@ -215,14 +212,6 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
|
||||
init_swait_queue_head(&rnp->nocb_gp_wq[1]);
|
||||
}
|
||||
|
||||
/* Is the specified CPU a no-CBs CPU? */
|
||||
bool rcu_is_nocb_cpu(int cpu)
|
||||
{
|
||||
if (cpumask_available(rcu_nocb_mask))
|
||||
return cpumask_test_cpu(cpu, rcu_nocb_mask);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
|
||||
struct rcu_data *rdp,
|
||||
bool force, unsigned long flags)
|
||||
@ -1180,10 +1169,10 @@ void __init rcu_init_nohz(void)
|
||||
return;
|
||||
}
|
||||
}
|
||||
rcu_nocb_is_setup = true;
|
||||
rcu_state.nocb_is_setup = true;
|
||||
}
|
||||
|
||||
if (!rcu_nocb_is_setup)
|
||||
if (!rcu_state.nocb_is_setup)
|
||||
return;
|
||||
|
||||
#if defined(CONFIG_NO_HZ_FULL)
|
||||
@ -1241,7 +1230,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
||||
struct task_struct *t;
|
||||
struct sched_param sp;
|
||||
|
||||
if (!rcu_scheduler_fully_active || !rcu_nocb_is_setup)
|
||||
if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
|
||||
return;
|
||||
|
||||
/* If there already is an rcuo kthread, then nothing to do. */
|
||||
@ -1277,22 +1266,6 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
||||
WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
|
||||
}
|
||||
|
||||
/*
|
||||
* Once the scheduler is running, spawn rcuo kthreads for all online
|
||||
* no-CBs CPUs. This assumes that the early_initcall()s happen before
|
||||
* non-boot CPUs come online -- if this changes, we will need to add
|
||||
* some mutual exclusion.
|
||||
*/
|
||||
static void __init rcu_spawn_nocb_kthreads(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (rcu_nocb_is_setup) {
|
||||
for_each_online_cpu(cpu)
|
||||
rcu_spawn_cpu_nocb_kthread(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
/* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
|
||||
static int rcu_nocb_gp_stride = -1;
|
||||
module_param(rcu_nocb_gp_stride, int, 0444);
|
||||
@ -1549,10 +1522,6 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void __init rcu_spawn_nocb_kthreads(void)
|
||||
{
|
||||
}
|
||||
|
||||
static void show_rcu_nocb_state(struct rcu_data *rdp)
|
||||
{
|
||||
}
|
||||
|
@ -486,6 +486,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||
t->rcu_read_unlock_special.s = 0;
|
||||
if (special.b.need_qs) {
|
||||
if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
|
||||
rdp->cpu_no_qs.b.norm = false;
|
||||
rcu_report_qs_rdp(rdp);
|
||||
udelay(rcu_unlock_delay);
|
||||
} else {
|
||||
@ -660,7 +661,13 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
||||
expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
|
||||
// Get scheduler to re-evaluate and call hooks.
|
||||
// If !IRQ_WORK, FQS scan will eventually IPI.
|
||||
init_irq_work(&rdp->defer_qs_iw, rcu_preempt_deferred_qs_handler);
|
||||
if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
|
||||
IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(
|
||||
rcu_preempt_deferred_qs_handler);
|
||||
else
|
||||
init_irq_work(&rdp->defer_qs_iw,
|
||||
rcu_preempt_deferred_qs_handler);
|
||||
rdp->defer_qs_iw_pending = true;
|
||||
irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
|
||||
}
|
||||
@ -1124,7 +1131,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
||||
__releases(rnp->lock)
|
||||
{
|
||||
raw_lockdep_assert_held_rcu_node(rnp);
|
||||
if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
|
||||
if (!rnp->boost_kthread_task ||
|
||||
(!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) {
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
@ -1226,18 +1234,6 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
||||
free_cpumask_var(cm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Spawn boost kthreads -- called as soon as the scheduler is running.
|
||||
*/
|
||||
static void __init rcu_spawn_boost_kthreads(void)
|
||||
{
|
||||
struct rcu_node *rnp;
|
||||
|
||||
rcu_for_each_leaf_node(rnp)
|
||||
if (rcu_rnp_online_cpus(rnp))
|
||||
rcu_spawn_one_boost_kthread(rnp);
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
||||
@ -1263,10 +1259,6 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void __init rcu_spawn_boost_kthreads(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
/*
|
||||
|
@ -565,9 +565,9 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
totqlen += rcu_get_n_cbs_cpu(cpu);
|
||||
pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
|
||||
pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n",
|
||||
smp_processor_id(), (long)(jiffies - gps),
|
||||
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
|
||||
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
|
||||
if (ndetected) {
|
||||
rcu_dump_cpu_stacks();
|
||||
|
||||
@ -626,9 +626,9 @@ static void print_cpu_stall(unsigned long gps)
|
||||
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
|
||||
for_each_possible_cpu(cpu)
|
||||
totqlen += rcu_get_n_cbs_cpu(cpu);
|
||||
pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
|
||||
pr_cont("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n",
|
||||
jiffies - gps,
|
||||
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
|
||||
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
|
||||
|
||||
rcu_check_gp_kthread_expired_fqs_timer();
|
||||
rcu_check_gp_kthread_starvation();
|
||||
|
@ -267,9 +267,10 @@ static void scf_handler(void *scfc_in)
|
||||
}
|
||||
this_cpu_inc(scf_invoked_count);
|
||||
if (longwait <= 0) {
|
||||
if (!(r & 0xffc0))
|
||||
if (!(r & 0xffc0)) {
|
||||
udelay(r & 0x3f);
|
||||
goto out;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (r & 0xfff)
|
||||
goto out;
|
||||
|
@ -8409,6 +8409,18 @@ static void __init preempt_dynamic_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
#define PREEMPT_MODEL_ACCESSOR(mode) \
|
||||
bool preempt_model_##mode(void) \
|
||||
{ \
|
||||
WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
|
||||
return preempt_dynamic_mode == preempt_dynamic_##mode; \
|
||||
} \
|
||||
EXPORT_SYMBOL_GPL(preempt_model_##mode)
|
||||
|
||||
PREEMPT_MODEL_ACCESSOR(none);
|
||||
PREEMPT_MODEL_ACCESSOR(voluntary);
|
||||
PREEMPT_MODEL_ACCESSOR(full);
|
||||
|
||||
#else /* !CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
static inline void preempt_dynamic_init(void) { }
|
||||
|
@ -183,7 +183,9 @@ static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
|
||||
static DEFINE_PER_CPU(void *, cur_csd_info);
|
||||
static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
|
||||
|
||||
#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
|
||||
static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
|
||||
module_param(csd_lock_timeout, ulong, 0444);
|
||||
|
||||
static atomic_t csd_bug_count = ATOMIC_INIT(0);
|
||||
static u64 cfd_seq;
|
||||
|
||||
@ -329,6 +331,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
|
||||
u64 ts2, ts_delta;
|
||||
call_single_data_t *cpu_cur_csd;
|
||||
unsigned int flags = READ_ONCE(csd->node.u_flags);
|
||||
unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
|
||||
|
||||
if (!(flags & CSD_FLAG_LOCK)) {
|
||||
if (!unlikely(*bug_id))
|
||||
@ -341,7 +344,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
|
||||
|
||||
ts2 = sched_clock();
|
||||
ts_delta = ts2 - *ts1;
|
||||
if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
|
||||
if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
|
||||
return false;
|
||||
|
||||
firsttime = !*bug_id;
|
||||
|
@ -144,6 +144,7 @@ config TRACING
|
||||
select BINARY_PRINTF
|
||||
select EVENT_TRACING
|
||||
select TRACE_CLOCK
|
||||
select TASKS_RCU if PREEMPTION
|
||||
|
||||
config GENERIC_TRACER
|
||||
bool
|
||||
|
@ -301,7 +301,7 @@ specify_qemu_cpus () {
|
||||
echo $2 -smp $3
|
||||
;;
|
||||
qemu-system-ppc64)
|
||||
nt="`lscpu | grep '^NUMA node0' | sed -e 's/^[^,]*,\([0-9]*\),.*$/\1/'`"
|
||||
nt="`lscpu | sed -n 's/^Thread(s) per core:\s*//p'`"
|
||||
echo $2 -smp cores=`expr \( $3 + $nt - 1 \) / $nt`,threads=$nt
|
||||
;;
|
||||
esac
|
||||
|
@ -36,7 +36,7 @@ do
|
||||
then
|
||||
egrep "error:|warning:|^ld: .*undefined reference to" < $i > $i.diags
|
||||
files="$files $i.diags $i"
|
||||
elif ! test -f ${scenariobasedir}/vmlinux
|
||||
elif ! test -f ${scenariobasedir}/vmlinux && ! test -f "${rundir}/re-run"
|
||||
then
|
||||
echo No ${scenariobasedir}/vmlinux file > $i.diags
|
||||
files="$files $i.diags $i"
|
||||
|
@ -33,7 +33,12 @@ do
|
||||
TORTURE_SUITE="`cat $i/../torture_suite`"
|
||||
configfile=`echo $i | sed -e 's,^.*/,,'`
|
||||
rm -f $i/console.log.*.diags
|
||||
kvm-recheck-${TORTURE_SUITE}.sh $i
|
||||
case "${TORTURE_SUITE}" in
|
||||
X*)
|
||||
;;
|
||||
*)
|
||||
kvm-recheck-${TORTURE_SUITE}.sh $i
|
||||
esac
|
||||
if test -f "$i/qemu-retval" && test "`cat $i/qemu-retval`" -ne 0 && test "`cat $i/qemu-retval`" -ne 137
|
||||
then
|
||||
echo QEMU error, output:
|
||||
|
@ -138,14 +138,14 @@ chmod +x $T/bin/kvm-remote-*.sh
|
||||
# Check first to avoid the need for cleanup for system-name typos
|
||||
for i in $systems
|
||||
do
|
||||
ncpus="`ssh $i getconf _NPROCESSORS_ONLN 2> /dev/null`"
|
||||
echo $i: $ncpus CPUs " " `date` | tee -a "$oldrun/remote-log"
|
||||
ncpus="`ssh -o BatchMode=yes $i getconf _NPROCESSORS_ONLN 2> /dev/null`"
|
||||
ret=$?
|
||||
if test "$ret" -ne 0
|
||||
then
|
||||
echo System $i unreachable, giving up. | tee -a "$oldrun/remote-log"
|
||||
exit 4
|
||||
fi
|
||||
echo $i: $ncpus CPUs " " `date` | tee -a "$oldrun/remote-log"
|
||||
done
|
||||
|
||||
# Download and expand the tarball on all systems.
|
||||
@ -153,14 +153,14 @@ echo Build-products tarball: `du -h $T/binres.tgz` | tee -a "$oldrun/remote-log"
|
||||
for i in $systems
|
||||
do
|
||||
echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log"
|
||||
cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
|
||||
cat $T/binres.tgz | ssh -o BatchMode=yes $i "cd /tmp; tar -xzf -"
|
||||
ret=$?
|
||||
tries=0
|
||||
while test "$ret" -ne 0
|
||||
do
|
||||
echo Unable to download $T/binres.tgz to system $i, waiting and then retrying. $tries prior retries. | tee -a "$oldrun/remote-log"
|
||||
sleep 60
|
||||
cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
|
||||
cat $T/binres.tgz | ssh -o BatchMode=yes $i "cd /tmp; tar -xzf -"
|
||||
ret=$?
|
||||
if test "$ret" -ne 0
|
||||
then
|
||||
@ -185,7 +185,7 @@ checkremotefile () {
|
||||
|
||||
while :
|
||||
do
|
||||
ssh $1 "test -f \"$2\""
|
||||
ssh -o BatchMode=yes $1 "test -f \"$2\""
|
||||
ret=$?
|
||||
if test "$ret" -eq 255
|
||||
then
|
||||
@ -228,7 +228,7 @@ startbatches () {
|
||||
then
|
||||
continue # System still running last test, skip.
|
||||
fi
|
||||
ssh "$i" "cd \"$resdir/$ds\"; touch remote.run; PATH=\"$T/bin:$PATH\" nohup kvm-remote-$curbatch.sh > kvm-remote-$curbatch.sh.out 2>&1 &" 1>&2
|
||||
ssh -o BatchMode=yes "$i" "cd \"$resdir/$ds\"; touch remote.run; PATH=\"$T/bin:$PATH\" nohup kvm-remote-$curbatch.sh > kvm-remote-$curbatch.sh.out 2>&1 &" 1>&2
|
||||
ret=$?
|
||||
if test "$ret" -ne 0
|
||||
then
|
||||
@ -267,7 +267,7 @@ do
|
||||
sleep 30
|
||||
done
|
||||
echo " ---" Collecting results from $i `date` | tee -a "$oldrun/remote-log"
|
||||
( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
|
||||
( cd "$oldrun"; ssh -o BatchMode=yes $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
|
||||
done
|
||||
|
||||
( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log"
|
||||
|
@ -44,6 +44,7 @@ TORTURE_KCONFIG_KASAN_ARG=""
|
||||
TORTURE_KCONFIG_KCSAN_ARG=""
|
||||
TORTURE_KMAKE_ARG=""
|
||||
TORTURE_QEMU_MEM=512
|
||||
torture_qemu_mem_default=1
|
||||
TORTURE_REMOTE=
|
||||
TORTURE_SHUTDOWN_GRACE=180
|
||||
TORTURE_SUITE=rcu
|
||||
@ -86,7 +87,7 @@ usage () {
|
||||
echo " --remote"
|
||||
echo " --results absolute-pathname"
|
||||
echo " --shutdown-grace seconds"
|
||||
echo " --torture lock|rcu|rcuscale|refscale|scf"
|
||||
echo " --torture lock|rcu|rcuscale|refscale|scf|X*"
|
||||
echo " --trust-make"
|
||||
exit 1
|
||||
}
|
||||
@ -180,6 +181,10 @@ do
|
||||
;;
|
||||
--kasan)
|
||||
TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG
|
||||
if test -n "$torture_qemu_mem_default"
|
||||
then
|
||||
TORTURE_QEMU_MEM=2G
|
||||
fi
|
||||
;;
|
||||
--kconfig|--kconfigs)
|
||||
checkarg --kconfig "(Kconfig options)" $# "$2" '^CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\)\( CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\)\)*$' '^error$'
|
||||
@ -202,6 +207,7 @@ do
|
||||
--memory)
|
||||
checkarg --memory "(memory size)" $# "$2" '^[0-9]\+[MG]\?$' error
|
||||
TORTURE_QEMU_MEM=$2
|
||||
torture_qemu_mem_default=
|
||||
shift
|
||||
;;
|
||||
--no-initrd)
|
||||
@ -231,7 +237,7 @@ do
|
||||
shift
|
||||
;;
|
||||
--torture)
|
||||
checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\|rcuscale\|refscale\|scf\)$' '^--'
|
||||
checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\|rcuscale\|refscale\|scf\|X.*\)$' '^--'
|
||||
TORTURE_SUITE=$2
|
||||
TORTURE_MOD="`echo $TORTURE_SUITE | sed -e 's/^\(lock\|rcu\|scf\)$/\1torture/'`"
|
||||
shift
|
||||
|
@ -54,6 +54,7 @@ do_kvfree=yes
|
||||
do_kasan=yes
|
||||
do_kcsan=no
|
||||
do_clocksourcewd=yes
|
||||
do_rt=yes
|
||||
|
||||
# doyesno - Helper function for yes/no arguments
|
||||
function doyesno () {
|
||||
@ -82,6 +83,7 @@ usage () {
|
||||
echo " --do-rcuscale / --do-no-rcuscale"
|
||||
echo " --do-rcutorture / --do-no-rcutorture"
|
||||
echo " --do-refscale / --do-no-refscale"
|
||||
echo " --do-rt / --do-no-rt"
|
||||
echo " --do-scftorture / --do-no-scftorture"
|
||||
echo " --duration [ <minutes> | <hours>h | <days>d ]"
|
||||
echo " --kcsan-kmake-arg kernel-make-arguments"
|
||||
@ -118,6 +120,7 @@ do
|
||||
do_scftorture=yes
|
||||
do_rcuscale=yes
|
||||
do_refscale=yes
|
||||
do_rt=yes
|
||||
do_kvfree=yes
|
||||
do_kasan=yes
|
||||
do_kcsan=yes
|
||||
@ -148,6 +151,7 @@ do
|
||||
do_scftorture=no
|
||||
do_rcuscale=no
|
||||
do_refscale=no
|
||||
do_rt=no
|
||||
do_kvfree=no
|
||||
do_kasan=no
|
||||
do_kcsan=no
|
||||
@ -162,6 +166,9 @@ do
|
||||
--do-refscale|--do-no-refscale)
|
||||
do_refscale=`doyesno "$1" --do-refscale`
|
||||
;;
|
||||
--do-rt|--do-no-rt)
|
||||
do_rt=`doyesno "$1" --do-rt`
|
||||
;;
|
||||
--do-scftorture|--do-no-scftorture)
|
||||
do_scftorture=`doyesno "$1" --do-scftorture`
|
||||
;;
|
||||
@ -322,6 +329,7 @@ then
|
||||
echo " --- make clean" > "$amcdir/Make.out" 2>&1
|
||||
make -j$MAKE_ALLOTED_CPUS clean >> "$amcdir/Make.out" 2>&1
|
||||
echo " --- make allmodconfig" >> "$amcdir/Make.out" 2>&1
|
||||
cp .config $amcdir
|
||||
make -j$MAKE_ALLOTED_CPUS allmodconfig >> "$amcdir/Make.out" 2>&1
|
||||
echo " --- make " >> "$amcdir/Make.out" 2>&1
|
||||
make -j$MAKE_ALLOTED_CPUS >> "$amcdir/Make.out" 2>&1
|
||||
@ -350,8 +358,19 @@ fi
|
||||
|
||||
if test "$do_scftorture" = "yes"
|
||||
then
|
||||
torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot"
|
||||
torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
|
||||
torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot csdlock_debug=1"
|
||||
torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 2G --trust-make
|
||||
fi
|
||||
|
||||
if test "$do_rt" = "yes"
|
||||
then
|
||||
# With all post-boot grace periods forced to normal.
|
||||
torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_normal=1"
|
||||
torture_set "rcurttorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make
|
||||
|
||||
# With all post-boot grace periods forced to expedited.
|
||||
torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_expedited=1"
|
||||
torture_set "rcurttorture-exp" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make
|
||||
fi
|
||||
|
||||
if test "$do_refscale" = yes
|
||||
@ -363,7 +382,7 @@ fi
|
||||
for prim in $primlist
|
||||
do
|
||||
torture_bootargs="refscale.scale_type="$prim" refscale.nreaders=$HALF_ALLOTED_CPUS refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot"
|
||||
torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS torture.verbose_sleep_frequency=8 torture.verbose_sleep_duration=$VERBOSE_BATCH_CPUS" --trust-make
|
||||
torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_TASKS_TRACE_RCU=y CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS torture.verbose_sleep_frequency=8 torture.verbose_sleep_duration=$VERBOSE_BATCH_CPUS" --trust-make
|
||||
done
|
||||
|
||||
if test "$do_rcuscale" = yes
|
||||
@ -375,13 +394,13 @@ fi
|
||||
for prim in $primlist
|
||||
do
|
||||
torture_bootargs="rcuscale.scale_type="$prim" rcuscale.nwriters=$HALF_ALLOTED_CPUS rcuscale.holdoff=20 torture.disable_onoff_at_boot"
|
||||
torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make
|
||||
torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_TASKS_TRACE_RCU=y CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make
|
||||
done
|
||||
|
||||
if test "$do_kvfree" = "yes"
|
||||
then
|
||||
torture_bootargs="rcuscale.kfree_rcu_test=1 rcuscale.kfree_nthreads=16 rcuscale.holdoff=20 rcuscale.kfree_loops=10000 torture.disable_onoff_at_boot"
|
||||
torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
|
||||
torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 2G --trust-make
|
||||
fi
|
||||
|
||||
if test "$do_clocksourcewd" = "yes"
|
||||
|
@ -8,3 +8,5 @@ CONFIG_DEBUG_LOCK_ALLOC=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
#CHECK#CONFIG_PROVE_RCU=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
CONFIG_FORCE_TASKS_RUDE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_RUDE_RCU=y
|
||||
|
@ -6,3 +6,5 @@ CONFIG_PREEMPT_NONE=y
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT=n
|
||||
#CHECK#CONFIG_RCU_EXPERT=n
|
||||
CONFIG_KPROBES=n
|
||||
CONFIG_FTRACE=n
|
||||
|
@ -7,4 +7,5 @@ CONFIG_PREEMPT=y
|
||||
CONFIG_DEBUG_LOCK_ALLOC=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
#CHECK#CONFIG_PROVE_RCU=y
|
||||
CONFIG_TASKS_RCU=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
@ -2,3 +2,7 @@ CONFIG_SMP=n
|
||||
CONFIG_PREEMPT_NONE=y
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT=n
|
||||
CONFIG_PREEMPT_DYNAMIC=n
|
||||
#CHECK#CONFIG_TASKS_RCU=y
|
||||
CONFIG_FORCE_TASKS_RCU=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
@ -1 +1,2 @@
|
||||
rcutorture.torture_type=tasks
|
||||
rcutorture.stat_interval=60
|
||||
|
@ -7,3 +7,5 @@ CONFIG_HZ_PERIODIC=n
|
||||
CONFIG_NO_HZ_IDLE=n
|
||||
CONFIG_NO_HZ_FULL=y
|
||||
#CHECK#CONFIG_RCU_EXPERT=n
|
||||
CONFIG_TASKS_RCU=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
@ -4,8 +4,11 @@ CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_PREEMPT_NONE=y
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT=n
|
||||
CONFIG_PREEMPT_DYNAMIC=n
|
||||
CONFIG_DEBUG_LOCK_ALLOC=n
|
||||
CONFIG_PROVE_LOCKING=n
|
||||
#CHECK#CONFIG_PROVE_RCU=n
|
||||
CONFIG_FORCE_TASKS_TRACE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_TRACE_RCU=y
|
||||
CONFIG_TASKS_TRACE_RCU_READ_MB=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
@ -7,5 +7,7 @@ CONFIG_PREEMPT=y
|
||||
CONFIG_DEBUG_LOCK_ALLOC=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
#CHECK#CONFIG_PROVE_RCU=y
|
||||
CONFIG_FORCE_TASKS_TRACE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_TRACE_RCU=y
|
||||
CONFIG_TASKS_TRACE_RCU_READ_MB=n
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
@ -1,8 +1,9 @@
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=8
|
||||
CONFIG_PREEMPT_NONE=y
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT_NONE=n
|
||||
CONFIG_PREEMPT_VOLUNTARY=y
|
||||
CONFIG_PREEMPT=n
|
||||
CONFIG_PREEMPT_DYNAMIC=n
|
||||
#CHECK#CONFIG_TREE_RCU=y
|
||||
CONFIG_HZ_PERIODIC=n
|
||||
CONFIG_NO_HZ_IDLE=n
|
||||
|
@ -3,6 +3,7 @@ CONFIG_NR_CPUS=16
|
||||
CONFIG_PREEMPT_NONE=y
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT=n
|
||||
CONFIG_PREEMPT_DYNAMIC=n
|
||||
#CHECK#CONFIG_TREE_RCU=y
|
||||
CONFIG_HZ_PERIODIC=n
|
||||
CONFIG_NO_HZ_IDLE=n
|
||||
|
@ -13,3 +13,5 @@ CONFIG_DEBUG_LOCK_ALLOC=n
|
||||
CONFIG_RCU_BOOST=n
|
||||
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
|
||||
#CHECK#CONFIG_RCU_EXPERT=n
|
||||
CONFIG_KPROBES=n
|
||||
CONFIG_FTRACE=n
|
||||
|
@ -3,6 +3,7 @@ CONFIG_NR_CPUS=56
|
||||
CONFIG_PREEMPT_NONE=y
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT=n
|
||||
CONFIG_PREEMPT_DYNAMIC=n
|
||||
#CHECK#CONFIG_TREE_RCU=y
|
||||
CONFIG_HZ_PERIODIC=n
|
||||
CONFIG_NO_HZ_IDLE=y
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
# rcutorture_param_n_barrier_cbs bootparam-string
|
||||
#
|
||||
# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
|
||||
# Adds n_barrier_cbs rcutorture module parameter if not already specified.
|
||||
rcutorture_param_n_barrier_cbs () {
|
||||
if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
|
||||
then
|
||||
@ -30,13 +30,25 @@ rcutorture_param_onoff () {
|
||||
fi
|
||||
}
|
||||
|
||||
# rcutorture_param_stat_interval bootparam-string
|
||||
#
|
||||
# Adds stat_interval rcutorture module parameter if not already specified.
|
||||
rcutorture_param_stat_interval () {
|
||||
if echo $1 | grep -q "rcutorture\.stat_interval"
|
||||
then
|
||||
:
|
||||
else
|
||||
echo rcutorture.stat_interval=15
|
||||
fi
|
||||
}
|
||||
|
||||
# per_version_boot_params bootparam-string config-file seconds
|
||||
#
|
||||
# Adds per-version torture-module parameters to kernels supporting them.
|
||||
per_version_boot_params () {
|
||||
echo $1 `rcutorture_param_onoff "$1" "$2"` \
|
||||
`rcutorture_param_n_barrier_cbs "$1"` \
|
||||
rcutorture.stat_interval=15 \
|
||||
`rcutorture_param_stat_interval "$1"` \
|
||||
rcutorture.shutdown_secs=$3 \
|
||||
rcutorture.test_no_idle_hz=1 \
|
||||
rcutorture.verbose=1
|
||||
|
@ -1,5 +1,6 @@
|
||||
CONFIG_RCU_SCALE_TEST=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_TASKS_RCU_GENERIC=y
|
||||
CONFIG_TASKS_RCU=y
|
||||
CONFIG_TASKS_TRACE_RCU=y
|
||||
CONFIG_FORCE_TASKS_RCU=y
|
||||
#CHECK#CONFIG_TASKS_RCU=y
|
||||
CONFIG_FORCE_TASKS_TRACE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_TRACE_RCU=y
|
||||
|
@ -16,3 +16,5 @@ CONFIG_RCU_BOOST=n
|
||||
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
|
||||
CONFIG_RCU_EXPERT=y
|
||||
CONFIG_RCU_TRACE=y
|
||||
CONFIG_KPROBES=n
|
||||
CONFIG_FTRACE=n
|
||||
|
@ -1,2 +1,6 @@
|
||||
CONFIG_RCU_REF_SCALE_TEST=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_FORCE_TASKS_RCU=y
|
||||
#CHECK#CONFIG_TASKS_RCU=y
|
||||
CONFIG_FORCE_TASKS_TRACE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_TRACE_RCU=y
|
||||
|
@ -15,3 +15,5 @@ CONFIG_PROVE_LOCKING=n
|
||||
CONFIG_RCU_BOOST=n
|
||||
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
|
||||
CONFIG_RCU_EXPERT=y
|
||||
CONFIG_KPROBES=n
|
||||
CONFIG_FTRACE=n
|
||||
|
@ -7,3 +7,5 @@ CONFIG_NO_HZ_IDLE=n
|
||||
CONFIG_NO_HZ_FULL=y
|
||||
CONFIG_DEBUG_LOCK_ALLOC=n
|
||||
CONFIG_PROVE_LOCKING=n
|
||||
CONFIG_KPROBES=n
|
||||
CONFIG_FTRACE=n
|
||||
|
@ -7,3 +7,4 @@ CONFIG_NO_HZ_IDLE=y
|
||||
CONFIG_NO_HZ_FULL=n
|
||||
CONFIG_DEBUG_LOCK_ALLOC=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
@ -25,6 +25,5 @@ per_version_boot_params () {
|
||||
echo $1 `scftorture_param_onoff "$1" "$2"` \
|
||||
scftorture.stat_interval=15 \
|
||||
scftorture.shutdown_secs=$3 \
|
||||
scftorture.verbose=1 \
|
||||
scf
|
||||
scftorture.verbose=1
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user