locking/rwsem: Better collate rwsem_read_trylock()

All users of rwsem_read_trylock() do rwsem_set_reader_owned(sem) on
success, move it into rwsem_read_trylock() proper.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20201207090243.GE3040@hirez.programming.kicks-ass.net
This commit is contained in:
Peter Zijlstra 2020-12-08 10:22:16 +01:00
parent 2b3c99ee63
commit 3379116a0c

View File

@ -273,9 +273,16 @@ static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
{
long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
if (WARN_ON_ONCE(cnt < 0))
rwsem_set_nonspinnable(sem);
return !(cnt & RWSEM_READ_FAILED_MASK);
if (!(cnt & RWSEM_READ_FAILED_MASK)) {
rwsem_set_reader_owned(sem);
return true;
}
return false;
}
/*
@ -1340,8 +1347,6 @@ static inline void __down_read(struct rw_semaphore *sem)
if (!rwsem_read_trylock(sem)) {
rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
}
@ -1351,8 +1356,6 @@ static inline int __down_read_interruptible(struct rw_semaphore *sem)
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
return -EINTR;
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
return 0;
}
@ -1363,8 +1366,6 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
return -EINTR;
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
return 0;
}