forked from Minki/linux
percpu: READ_ONCE() now implies smp_read_barrier_depends()
Because READ_ONCE() now implies smp_read_barrier_depends(), this commit removes the now-redundant smp_read_barrier_depends() following the READ_ONCE() in __ref_is_percpu(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com>
This commit is contained in:
parent
7088efa913
commit
b393e8b33e
@ -139,12 +139,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
|
||||
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
|
||||
* between contaminating the pointer value, meaning that
|
||||
* READ_ONCE() is required when fetching it.
|
||||
*
|
||||
* The smp_read_barrier_depends() implied by READ_ONCE() pairs
|
||||
* with smp_store_release() in __percpu_ref_switch_to_percpu().
|
||||
*/
|
||||
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
|
||||
|
||||
/* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
|
||||
smp_read_barrier_depends();
|
||||
|
||||
/*
|
||||
* Theoretically, the following could test just ATOMIC; however,
|
||||
* then we'd have to mask off DEAD separately as DEAD may be
|
||||
|
@ -197,10 +197,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
||||
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
|
||||
|
||||
/*
|
||||
* Restore per-cpu operation. smp_store_release() is paired with
|
||||
* smp_read_barrier_depends() in __ref_is_percpu() and guarantees
|
||||
* that the zeroing is visible to all percpu accesses which can see
|
||||
* the following __PERCPU_REF_ATOMIC clearing.
|
||||
* Restore per-cpu operation. smp_store_release() is paired
|
||||
* with READ_ONCE() in __ref_is_percpu() and guarantees that the
|
||||
* zeroing is visible to all percpu accesses which can see the
|
||||
* following __PERCPU_REF_ATOMIC clearing.
|
||||
*/
|
||||
for_each_possible_cpu(cpu)
|
||||
*per_cpu_ptr(percpu_count, cpu) = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user