rcu/nocb: Make shrinker iterate only over NOCB CPUs

Callbacks can only be queued as lazy on NOCB CPUs, therefore iterating
over the NOCB mask is enough for both counting and scanning. Just lock
the mostly uncontended barrier mutex on counting as well in order to
keep rcu_nocb_mask stable.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Frederic Weisbecker 2023-03-29 18:02:03 +02:00 committed by Paul E. McKenney
parent b96a8b0b5b
commit fbde57d2d2

View File

@ -1319,13 +1319,22 @@ lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
int cpu;
unsigned long count = 0;
if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
return 0;
/* Protect rcu_nocb_mask against concurrent (de-)offloading. */
if (!mutex_trylock(&rcu_state.barrier_mutex))
return 0;
/* Snapshot count of all CPUs */
for_each_possible_cpu(cpu) {
for_each_cpu(cpu, rcu_nocb_mask) {
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
count += READ_ONCE(rdp->lazy_len);
}
mutex_unlock(&rcu_state.barrier_mutex);
return count ? count : SHRINK_EMPTY;
}
@ -1336,6 +1345,8 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
unsigned long flags;
unsigned long count = 0;
if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
return 0;
/*
* Protect against concurrent (de-)offloading. Otherwise nocb locking
* may be ignored or imbalanced.
@ -1351,11 +1362,11 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
}
/* Snapshot count of all CPUs */
for_each_possible_cpu(cpu) {
for_each_cpu(cpu, rcu_nocb_mask) {
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
int _count;
if (!rcu_rdp_is_offloaded(rdp))
if (WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)))
continue;
if (!READ_ONCE(rdp->lazy_len))