mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
2cf12f821c
llist_del_first reads entry->next, but it did not acquire visibility over the entry node. As the result it can get a stale value of entry->next (e.g. NULL or whatever garbage was there before the appending thread wrote correct value). And then commit that value as llist head with cmpxchg. That will corrupt llist. Note there is a control-dependency between read of head->first and read of entry->next, but it does not make the code correct. Kernel memory model unambiguously says: "A load-load control dependency requires a full read memory barrier". Use smp_load_acquire to acquire visibility over the entry node. The data race was found with KernelThreadSanitizer (KTSAN). Here is an example of KTSAN report: ThreadSanitizer: data-race in llist_del_first Read of size 1 by thread T389 (K2630, CPU0): [<ffffffff8156b8a9>] llist_del_first+0x39/0x70 lib/llist.c:74 [< inlined >] tty_buffer_alloc drivers/tty/tty_buffer.c:181 [<ffffffff81664af4>] __tty_buffer_request_room+0xb4/0x250 drivers/tty/tty_buffer.c:292 [<ffffffff81664e6c>] tty_insert_flip_string_fixed_flag+0x6c/0x150 drivers/tty/tty_buffer.c:337 [< inlined >] tty_insert_flip_string include/linux/tty_flip.h:35 [<ffffffff81667422>] pty_write+0x72/0xc0 drivers/tty/pty.c:110 [< inlined >] process_output_block drivers/tty/n_tty.c:611 [<ffffffff8165c016>] n_tty_write+0x346/0x7f0 drivers/tty/n_tty.c:2401 [< inlined >] do_tty_write drivers/tty/tty_io.c:1159 [<ffffffff816568df>] tty_write+0x21f/0x3f0 drivers/tty/tty_io.c:1245 [<ffffffff8125f00f>] __vfs_write+0x5f/0x1f0 fs/read_write.c:489 [<ffffffff8125ff8f>] vfs_write+0xef/0x280 fs/read_write.c:538 [< inlined >] SYSC_write fs/read_write.c:585 [<ffffffff81261390>] SyS_write+0x70/0xe0 fs/read_write.c:577 [<ffffffff81ee862e>] entry_SYSCALL_64_fastpath+0x12/0x71 arch/x86/entry/entry_64.S:186 Previous write of size 8 by thread T226 (K761, CPU0): [<ffffffff8156b832>] llist_add_batch+0x32/0x70 lib/llist.c:44 (discriminator 16) [< inlined >] llist_add include/linux/llist.h:180 [<ffffffff816649fc>] tty_buffer_free+0x6c/0xb0 drivers/tty/tty_buffer.c:221 [<ffffffff816651e7>] flush_to_ldisc+0x107/0x300 drivers/tty/tty_buffer.c:514 [<ffffffff810b20ee>] process_one_work+0x47e/0x930 kernel/workqueue.c:2036 [<ffffffff810b2650>] worker_thread+0xb0/0x900 kernel/workqueue.c:2170 [<ffffffff810bbe20>] kthread+0x150/0x170 kernel/kthread.c:209 [<ffffffff81ee8a1f>] ret_from_fork+0x3f/0x70 arch/x86/entry/entry_64.S:526 Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Huang Ying <ying.huang@intel.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Alexander Potapenko <glider@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
105 lines
3.1 KiB
C
105 lines
3.1 KiB
C
/*
|
|
* Lock-less NULL terminated single linked list
|
|
*
|
|
* The basic atomic operation of this list is cmpxchg on long. On
|
|
* architectures that don't have NMI-safe cmpxchg implementation, the
|
|
* list can NOT be used in NMI handlers. So code that uses the list in
|
|
* an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
|
|
*
|
|
* Copyright 2010,2011 Intel Corp.
|
|
* Author: Huang Ying <ying.huang@intel.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License version
|
|
* 2 as published by the Free Software Foundation;
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/llist.h>
|
|
|
|
|
|
/**
|
|
* llist_add_batch - add several linked entries in batch
|
|
* @new_first: first entry in batch to be added
|
|
* @new_last: last entry in batch to be added
|
|
* @head: the head for your lock-less list
|
|
*
|
|
* Return whether list is empty before adding.
|
|
*/
|
|
bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
|
|
struct llist_head *head)
|
|
{
|
|
struct llist_node *first;
|
|
|
|
do {
|
|
new_last->next = first = ACCESS_ONCE(head->first);
|
|
} while (cmpxchg(&head->first, first, new_first) != first);
|
|
|
|
return !first;
|
|
}
|
|
EXPORT_SYMBOL_GPL(llist_add_batch);
|
|
|
|
/**
|
|
* llist_del_first - delete the first entry of lock-less list
|
|
* @head: the head for your lock-less list
|
|
*
|
|
* If list is empty, return NULL, otherwise, return the first entry
|
|
* deleted, this is the newest added one.
|
|
*
|
|
* Only one llist_del_first user can be used simultaneously with
|
|
* multiple llist_add users without lock. Because otherwise
|
|
* llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
|
|
* llist_add) sequence in another user may change @head->first->next,
|
|
* but keep @head->first. If multiple consumers are needed, please
|
|
* use llist_del_all or use lock between consumers.
|
|
*/
|
|
struct llist_node *llist_del_first(struct llist_head *head)
|
|
{
|
|
struct llist_node *entry, *old_entry, *next;
|
|
|
|
entry = smp_load_acquire(&head->first);
|
|
for (;;) {
|
|
if (entry == NULL)
|
|
return NULL;
|
|
old_entry = entry;
|
|
next = READ_ONCE(entry->next);
|
|
entry = cmpxchg(&head->first, old_entry, next);
|
|
if (entry == old_entry)
|
|
break;
|
|
}
|
|
|
|
return entry;
|
|
}
|
|
EXPORT_SYMBOL_GPL(llist_del_first);
|
|
|
|
/**
|
|
* llist_reverse_order - reverse order of a llist chain
|
|
* @head: first item of the list to be reversed
|
|
*
|
|
* Reverse the order of a chain of llist entries and return the
|
|
* new first entry.
|
|
*/
|
|
struct llist_node *llist_reverse_order(struct llist_node *head)
|
|
{
|
|
struct llist_node *new_head = NULL;
|
|
|
|
while (head) {
|
|
struct llist_node *tmp = head;
|
|
head = head->next;
|
|
tmp->next = new_head;
|
|
new_head = tmp;
|
|
}
|
|
|
|
return new_head;
|
|
}
|
|
EXPORT_SYMBOL_GPL(llist_reverse_order);
|