locking: Fix typos in comments

Fix ~16 single-word typos in locking code comments.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2021-03-22 02:35:05 +01:00
parent 8c52cca04f
commit e2db7592be
11 changed files with 16 additions and 16 deletions

View File

@ -22,7 +22,7 @@
* assembler to insert a extra (16-bit) IT instruction, depending on the * assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions. * presence or absence of neighbouring conditional instructions.
* *
* To avoid this unpredictableness, an approprite IT is inserted explicitly: * To avoid this unpredictability, an appropriate IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present * the assembler won't change IT instructions which are explicitly present
* in the input. * in the input.
*/ */

View File

@ -155,7 +155,7 @@ extern void lockdep_set_selftest_task(struct task_struct *task);
extern void lockdep_init_task(struct task_struct *task); extern void lockdep_init_task(struct task_struct *task);
/* /*
* Split the recrursion counter in two to readily detect 'off' vs recursion. * Split the recursion counter in two to readily detect 'off' vs recursion.
*/ */
#define LOCKDEP_RECURSION_BITS 16 #define LOCKDEP_RECURSION_BITS 16
#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)

View File

@ -110,7 +110,7 @@ do { \
/* /*
* This is the same regardless of which rwsem implementation that is being used. * This is the same regardless of which rwsem implementation that is being used.
* It is just a heuristic meant to be called by somebody alreadying holding the * It is just a heuristic meant to be called by somebody already holding the
* rwsem to see if somebody from an incompatible type is wanting access to the * rwsem to see if somebody from an incompatible type is wanting access to the
* lock. * lock.
*/ */

View File

@ -1747,7 +1747,7 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
/* /*
* Step 4: if not match, expand the path by adding the * Step 4: if not match, expand the path by adding the
* forward or backwards dependencis in the search * forward or backwards dependencies in the search
* *
*/ */
first = true; first = true;
@ -1916,7 +1916,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
* -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
* dependency graph, as any strong path ..-> A -> B ->.. we can get with * dependency graph, as any strong path ..-> A -> B ->.. we can get with
* having dependency A -> B, we could already get a equivalent path ..-> A -> * having dependency A -> B, we could already get a equivalent path ..-> A ->
* .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant. * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant.
* *
* We need to make sure both the start and the end of A -> .. -> B is not * We need to make sure both the start and the end of A -> .. -> B is not
* weaker than A -> B. For the start part, please see the comment in * weaker than A -> B. For the start part, please see the comment in

View File

@ -348,7 +348,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
debug_locks); debug_locks);
/* /*
* Zappped classes and lockdep data buffers reuse statistics. * Zapped classes and lockdep data buffers reuse statistics.
*/ */
seq_puts(m, "\n"); seq_puts(m, "\n");
seq_printf(m, " zapped classes: %11lu\n", seq_printf(m, " zapped classes: %11lu\n",

View File

@ -7,7 +7,7 @@
* The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
* with the desirable properties of being fair, and with each cpu trying * with the desirable properties of being fair, and with each cpu trying
* to acquire the lock spinning on a local variable. * to acquire the lock spinning on a local variable.
* It avoids expensive cache bouncings that common test-and-set spin-lock * It avoids expensive cache bounces that common test-and-set spin-lock
* implementations incur. * implementations incur.
*/ */
#ifndef __LINUX_MCS_SPINLOCK_H #ifndef __LINUX_MCS_SPINLOCK_H

View File

@ -92,7 +92,7 @@ static inline unsigned long __owner_flags(unsigned long owner)
} }
/* /*
* Trylock variant that retuns the owning task on failure. * Trylock variant that returns the owning task on failure.
*/ */
static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
{ {
@ -207,7 +207,7 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
/* /*
* Give up ownership to a specific task, when @task = NULL, this is equivalent * Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
* WAITERS. Provides RELEASE semantics like a regular unlock, the * WAITERS. Provides RELEASE semantics like a regular unlock, the
* __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
*/ */

View File

@ -135,7 +135,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
*/ */
/* /*
* Wait to acquire the lock or cancelation. Note that need_resched() * Wait to acquire the lock or cancellation. Note that need_resched()
* will come with an IPI, which will wake smp_cond_load_relaxed() if it * will come with an IPI, which will wake smp_cond_load_relaxed() if it
* is implemented with a monitor-wait. vcpu_is_preempted() relies on * is implemented with a monitor-wait. vcpu_is_preempted() relies on
* polling, be careful. * polling, be careful.
@ -164,7 +164,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
/* /*
* We can only fail the cmpxchg() racing against an unlock(), * We can only fail the cmpxchg() racing against an unlock(),
* in which case we should observe @node->locked becomming * in which case we should observe @node->locked becoming
* true. * true.
*/ */
if (smp_load_acquire(&node->locked)) if (smp_load_acquire(&node->locked))

View File

@ -706,7 +706,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
} else if (prerequeue_top_waiter == waiter) { } else if (prerequeue_top_waiter == waiter) {
/* /*
* The waiter was the top waiter on the lock, but is * The waiter was the top waiter on the lock, but is
* no longer the top prority waiter. Replace waiter in * no longer the top priority waiter. Replace waiter in
* the owner tasks pi waiters tree with the new top * the owner tasks pi waiters tree with the new top
* (highest priority) waiter and adjust the priority * (highest priority) waiter and adjust the priority
* of the owner. * of the owner.
@ -1194,7 +1194,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
return; return;
/* /*
* Yell lowdly and stop the task right here. * Yell loudly and stop the task right here.
*/ */
rt_mutex_print_deadlock(w); rt_mutex_print_deadlock(w);
while (1) { while (1) {

View File

@ -819,7 +819,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* we try to get it. The new owner may be a spinnable * we try to get it. The new owner may be a spinnable
* writer. * writer.
* *
* To take advantage of two scenarios listed agove, the RT * To take advantage of two scenarios listed above, the RT
* task is made to retry one more time to see if it can * task is made to retry one more time to see if it can
* acquire the lock or continue spinning on the new owning * acquire the lock or continue spinning on the new owning
* writer. Of course, if the time lag is long enough or the * writer. Of course, if the time lag is long enough or the

View File

@ -58,10 +58,10 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
/* /*
* We build the __lock_function inlines here. They are too large for * We build the __lock_function inlines here. They are too large for
* inlining all over the place, but here is only one user per function * inlining all over the place, but here is only one user per function
* which embedds them into the calling _lock_function below. * which embeds them into the calling _lock_function below.
* *
* This could be a long-held lock. We both prepare to spin for a long * This could be a long-held lock. We both prepare to spin for a long
* time (making _this_ CPU preemptable if possible), and we also signal * time (making _this_ CPU preemptible if possible), and we also signal
* towards that other CPU that it should break the lock ASAP. * towards that other CPU that it should break the lock ASAP.
*/ */
#define BUILD_LOCK_OPS(op, locktype) \ #define BUILD_LOCK_OPS(op, locktype) \