forked from Minki/linux
fb6a44f33b
Without using WRITE_ONCE(), the compiler can potentially break a write into multiple smaller ones (store tearing). So a read from the same data by another task concurrently may return a partial result. This can result in a kernel crash if the data is a memory address that is being dereferenced. This patch changes all write to rwsem->owner to use WRITE_ONCE() to make sure that store tearing will not happen. READ_ONCE() may not be needed for rwsem->owner as long as the value is only used for comparison and not dereferencing. Signed-off-by: Waiman Long <Waiman.Long@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Douglas Hatch <doug.hatch@hpe.com> Cc: Jason Low <jason.low2@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Scott J Norton <scott.norton@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1463534783-38814-3-git-send-email-Waiman.Long@hpe.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
69 lines
1.9 KiB
C
69 lines
1.9 KiB
C
/*
|
|
* The owner field of the rw_semaphore structure will be set to
|
|
* RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear
|
|
* the owner field when it unlocks. A reader, on the other hand, will
|
|
* not touch the owner field when it unlocks.
|
|
*
|
|
* In essence, the owner field now has the following 3 states:
|
|
* 1) 0
|
|
* - lock is free or the owner hasn't set the field yet
|
|
* 2) RWSEM_READER_OWNED
|
|
* - lock is currently or previously owned by readers (lock is free
|
|
* or not set by owner yet)
|
|
* 3) Other non-zero value
|
|
* - a writer owns the lock
|
|
*/
|
|
#define RWSEM_READER_OWNED ((struct task_struct *)1UL)
|
|
|
|
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
|
|
/*
|
|
* All writes to owner are protected by WRITE_ONCE() to make sure that
|
|
* store tearing can't happen as optimistic spinners may read and use
|
|
* the owner value concurrently without lock. Read from owner, however,
|
|
* may not need READ_ONCE() as long as the pointer value is only used
|
|
* for comparison and isn't being dereferenced.
|
|
*/
|
|
static inline void rwsem_set_owner(struct rw_semaphore *sem)
|
|
{
|
|
WRITE_ONCE(sem->owner, current);
|
|
}
|
|
|
|
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
|
|
{
|
|
WRITE_ONCE(sem->owner, NULL);
|
|
}
|
|
|
|
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
|
|
{
|
|
/*
|
|
* We check the owner value first to make sure that we will only
|
|
* do a write to the rwsem cacheline when it is really necessary
|
|
* to minimize cacheline contention.
|
|
*/
|
|
if (sem->owner != RWSEM_READER_OWNED)
|
|
WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
|
|
}
|
|
|
|
static inline bool rwsem_owner_is_writer(struct task_struct *owner)
|
|
{
|
|
return owner && owner != RWSEM_READER_OWNED;
|
|
}
|
|
|
|
static inline bool rwsem_owner_is_reader(struct task_struct *owner)
|
|
{
|
|
return owner == RWSEM_READER_OWNED;
|
|
}
|
|
#else
|
|
static inline void rwsem_set_owner(struct rw_semaphore *sem)
|
|
{
|
|
}
|
|
|
|
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
|
|
{
|
|
}
|
|
|
|
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
|
|
{
|
|
}
|
|
#endif
|