arch: Mass conversion of smp_mb__*()
Mostly scripted conversion of the smp_mb__* barriers. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-arch@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
1b15611e1c
commit
4e857c58ef
@@ -165,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||
* do a write memory barrier, and then update the count, to
|
||||
* make sure the vector is visible when count is set.
|
||||
*/
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&(vec)->count);
|
||||
do_mb = 1;
|
||||
}
|
||||
@@ -185,14 +185,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||
* the new priority vec.
|
||||
*/
|
||||
if (do_mb)
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/*
|
||||
* When removing from the vector, we decrement the counter first
|
||||
* do a memory barrier and then clear the mask.
|
||||
*/
|
||||
atomic_dec(&(vec)->count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
cpumask_clear_cpu(cpu, vec->mask);
|
||||
}
|
||||
|
||||
|
||||
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(__wake_up_bit);
|
||||
*
|
||||
* In order for this to function properly, as it uses waitqueue_active()
|
||||
* internally, some kind of memory barrier must be done prior to calling
|
||||
* this. Typically, this will be smp_mb__after_clear_bit(), but in some
|
||||
* this. Typically, this will be smp_mb__after_atomic(), but in some
|
||||
* cases where bitflags are manipulated non-atomically under a lock, one
|
||||
* may need to use a less regular barrier, such fs/inode.c's smp_mb(),
|
||||
* because spin_unlock() does not guarantee a memory barrier.
|
||||
|
||||
Reference in New Issue
Block a user