rcu: Move synchronize_sched_expedited() state to rcu_state

Tracing (debugfs) of expedited RCU primitives is required, which in turn
requires that the relevant data be located where the tracing code can find
it, not in its current static global variables in kernel/rcutree.c.
This commit therefore moves sync_sched_expedited_started and
sync_sched_expedited_done to the rcu_state structure, as fields
->expedited_start and ->expedited_done, respectively.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2012-10-11 15:24:03 -07:00 committed by Paul E. McKenney
parent 1924bcb025
commit 40694d6644
2 changed files with 12 additions and 11 deletions

View File

@ -2249,9 +2249,6 @@ void synchronize_rcu_bh(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
static atomic_long_t sync_sched_expedited_started = ATOMIC_LONG_INIT(0);
static atomic_long_t sync_sched_expedited_done = ATOMIC_LONG_INIT(0);
static int synchronize_sched_expedited_cpu_stop(void *data)
{
/*
@ -2310,6 +2307,7 @@ void synchronize_sched_expedited(void)
{
long firstsnap, s, snap;
int trycount = 0;
struct rcu_state *rsp = &rcu_sched_state;
/*
* If we are in danger of counter wrap, just do synchronize_sched().
@ -2319,8 +2317,8 @@ void synchronize_sched_expedited(void)
* counter wrap on a 32-bit system. Quite a few more CPUs would of
* course be required on a 64-bit system.
*/
if (ULONG_CMP_GE((ulong)atomic_read(&sync_sched_expedited_started),
(ulong)atomic_read(&sync_sched_expedited_done) +
if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
(ulong)atomic_long_read(&rsp->expedited_done) +
ULONG_MAX / 8)) {
synchronize_sched();
return;
@ -2330,7 +2328,7 @@ void synchronize_sched_expedited(void)
* Take a ticket. Note that atomic_inc_return() implies a
* full memory barrier.
*/
snap = atomic_long_inc_return(&sync_sched_expedited_started);
snap = atomic_long_inc_return(&rsp->expedited_start);
firstsnap = snap;
get_online_cpus();
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
@ -2345,7 +2343,7 @@ void synchronize_sched_expedited(void)
put_online_cpus();
/* Check to see if someone else did our work for us. */
s = atomic_long_read(&sync_sched_expedited_done);
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */
return;
@ -2360,7 +2358,7 @@ void synchronize_sched_expedited(void)
}
/* Recheck to see if someone else did our work for us. */
s = atomic_long_read(&sync_sched_expedited_done);
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */
return;
@ -2374,7 +2372,7 @@ void synchronize_sched_expedited(void)
* period works for us.
*/
get_online_cpus();
snap = atomic_long_read(&sync_sched_expedited_started);
snap = atomic_long_read(&rsp->expedited_start);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}
@ -2385,12 +2383,12 @@ void synchronize_sched_expedited(void)
* than we did already did their update.
*/
do {
s = atomic_long_read(&sync_sched_expedited_done);
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
smp_mb(); /* ensure test happens before caller kfree */
break;
}
} while (atomic_long_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
put_online_cpus();
}

View File

@ -404,6 +404,9 @@ struct rcu_state {
/* _rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */
atomic_long_t expedited_start; /* Starting ticket. */
atomic_long_t expedited_done; /* Done ticket. */
unsigned long jiffies_force_qs; /* Time at which to invoke */
/* force_quiescent_state(). */
unsigned long n_force_qs; /* Number of calls to */