rcu: Use *_ONCE() to protect lockless ->expmask accesses
The rcu_node structure's ->expmask field is accessed locklessly when starting a new expedited grace period and when reporting an expedited RCU CPU stall warning. This commit therefore handles the former by taking a snapshot of ->expmask while the lock is held and the latter by applying READ_ONCE() to lockless reads and WRITE_ONCE() to the corresponding updates. Link: https://lore.kernel.org/lkml/CANpmjNNmSOagbTpffHr4=Yedckx9Rm2NuGqC9UqE+AOz5f1-ZQ@mail.gmail.com Reported-by: syzbot+134336b86f728d6e55a0@syzkaller.appspotmail.com Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Acked-by: Marco Elver <elver@google.com>
This commit is contained in:
parent
e42617b825
commit
15c7c972cd
@ -134,7 +134,7 @@ static void __maybe_unused sync_exp_reset_tree(void)
|
|||||||
rcu_for_each_node_breadth_first(rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
WARN_ON_ONCE(rnp->expmask);
|
WARN_ON_ONCE(rnp->expmask);
|
||||||
rnp->expmask = rnp->expmaskinit;
|
WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -211,7 +211,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
|
|||||||
rnp = rnp->parent;
|
rnp = rnp->parent;
|
||||||
raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
|
raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
|
||||||
WARN_ON_ONCE(!(rnp->expmask & mask));
|
WARN_ON_ONCE(!(rnp->expmask & mask));
|
||||||
rnp->expmask &= ~mask;
|
WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -241,7 +241,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
|
|||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
rnp->expmask &= ~mask;
|
WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
|
||||||
__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
|
__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -372,12 +372,10 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
|
|||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
|
|
||||||
/* IPI the remaining CPUs for expedited quiescent state. */
|
/* IPI the remaining CPUs for expedited quiescent state. */
|
||||||
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
|
for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
|
||||||
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
||||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
|
||||||
if (!(mask_ofl_ipi & mask))
|
|
||||||
continue;
|
|
||||||
retry_ipi:
|
retry_ipi:
|
||||||
if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
|
if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
|
||||||
mask_ofl_test |= mask;
|
mask_ofl_test |= mask;
|
||||||
@ -491,7 +489,7 @@ static void synchronize_sched_expedited_wait(void)
|
|||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
|
|
||||||
mask = leaf_node_cpu_bit(rnp, cpu);
|
mask = leaf_node_cpu_bit(rnp, cpu);
|
||||||
if (!(rnp->expmask & mask))
|
if (!(READ_ONCE(rnp->expmask) & mask))
|
||||||
continue;
|
continue;
|
||||||
ndetected++;
|
ndetected++;
|
||||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
@ -503,7 +501,8 @@ static void synchronize_sched_expedited_wait(void)
|
|||||||
}
|
}
|
||||||
pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
|
pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
|
||||||
jiffies - jiffies_start, rcu_state.expedited_sequence,
|
jiffies - jiffies_start, rcu_state.expedited_sequence,
|
||||||
rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
|
READ_ONCE(rnp_root->expmask),
|
||||||
|
".T"[!!rnp_root->exp_tasks]);
|
||||||
if (ndetected) {
|
if (ndetected) {
|
||||||
pr_err("blocking rcu_node structures:");
|
pr_err("blocking rcu_node structures:");
|
||||||
rcu_for_each_node_breadth_first(rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
@ -513,7 +512,7 @@ static void synchronize_sched_expedited_wait(void)
|
|||||||
continue;
|
continue;
|
||||||
pr_cont(" l=%u:%d-%d:%#lx/%c",
|
pr_cont(" l=%u:%d-%d:%#lx/%c",
|
||||||
rnp->level, rnp->grplo, rnp->grphi,
|
rnp->level, rnp->grplo, rnp->grphi,
|
||||||
rnp->expmask,
|
READ_ONCE(rnp->expmask),
|
||||||
".T"[!!rnp->exp_tasks]);
|
".T"[!!rnp->exp_tasks]);
|
||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
@ -521,7 +520,7 @@ static void synchronize_sched_expedited_wait(void)
|
|||||||
rcu_for_each_leaf_node(rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||||
mask = leaf_node_cpu_bit(rnp, cpu);
|
mask = leaf_node_cpu_bit(rnp, cpu);
|
||||||
if (!(rnp->expmask & mask))
|
if (!(READ_ONCE(rnp->expmask) & mask))
|
||||||
continue;
|
continue;
|
||||||
dump_cpu_task(cpu);
|
dump_cpu_task(cpu);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user