mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 04:42:12 +00:00
mm/gup: prevent gup_fast from racing with COW during fork
Since commit70e806e4e6
("mm: Do early cow for pinned pages during fork() for ptes") pages under a FOLL_PIN will not be write protected during COW for fork. This means that pages returned from pin_user_pages(FOLL_WRITE) should not become write protected while the pin is active. However, there is a small race where get_user_pages_fast(FOLL_PIN) can establish a FOLL_PIN at the same time copy_present_page() is write protecting it: CPU 0 CPU 1 get_user_pages_fast() internal_get_user_pages_fast() copy_page_range() pte_alloc_map_lock() copy_present_page() atomic_read(has_pinned) == 0 page_maybe_dma_pinned() == false atomic_set(has_pinned, 1); gup_pgd_range() gup_pte_range() pte_t pte = gup_get_pte(ptep) pte_access_permitted(pte) try_grab_compound_head() pte = pte_wrprotect(pte) set_pte_at(); pte_unmap_unlock() // GUP now returns with a write protected page The first attempt to resolve this by using the write protect caused problems (and was missing a barrrier), see commitf3c64eda3e
("mm: avoid early COW write protect games during fork()") Instead wrap copy_p4d_range() with the write side of a seqcount and check the read side around gup_pgd_range(). If there is a collision then get_user_pages_fast() fails and falls back to slow GUP. Slow GUP is safe against this race because copy_page_range() is only called while holding the exclusive side of the mmap_lock on the src mm_struct. [akpm@linux-foundation.org: coding style fixes] Link: https://lore.kernel.org/r/CAHk-=wi=iCnYCARbPGjkVJu9eyYeZ13N64tZYLdOB8CP5Q_PLw@mail.gmail.com Link: https://lkml.kernel.org/r/2-v4-908497cf359a+4782-gup_fork_jgg@nvidia.com Fixes:f3c64eda3e
("mm: avoid early COW write protect games during fork()") Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Peter Xu <peterx@redhat.com> Acked-by: "Ahmed S. Darwish" <a.darwish@linutronix.de> [seqcount_t parts] Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hugh Dickins <hughd@google.com> Cc: Jann Horn <jannh@google.com> Cc: Kirill Shutemov <kirill@shutemov.name> Cc: Kirill Tkhai <ktkhai@virtuozzo.com> Cc: Leon Romanovsky <leonro@nvidia.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c28b1fc703
commit
57efa1fe59
@ -93,6 +93,7 @@ static struct mm_struct tboot_mm = {
|
||||
.pgd = swapper_pg_dir,
|
||||
.mm_users = ATOMIC_INIT(2),
|
||||
.mm_count = ATOMIC_INIT(1),
|
||||
.write_protect_seq = SEQCNT_ZERO(tboot_mm.write_protect_seq),
|
||||
MMAP_LOCK_INITIALIZER(init_mm)
|
||||
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
|
||||
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
|
||||
|
@ -57,6 +57,7 @@ struct mm_struct efi_mm = {
|
||||
.mm_rb = RB_ROOT,
|
||||
.mm_users = ATOMIC_INIT(2),
|
||||
.mm_count = ATOMIC_INIT(1),
|
||||
.write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
|
||||
MMAP_LOCK_INITIALIZER(efi_mm)
|
||||
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
|
||||
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/uprobes.h>
|
||||
#include <linux/page-flags-layout.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
#include <asm/mmu.h>
|
||||
|
||||
@ -446,6 +447,13 @@ struct mm_struct {
|
||||
*/
|
||||
atomic_t has_pinned;
|
||||
|
||||
/**
|
||||
* @write_protect_seq: Locked when any thread is write
|
||||
* protecting pages mapped by this mm to enforce a later COW,
|
||||
* for instance during page table copying for fork().
|
||||
*/
|
||||
seqcount_t write_protect_seq;
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
atomic_long_t pgtables_bytes; /* PTE page table pages */
|
||||
#endif
|
||||
|
@ -1007,6 +1007,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
||||
mm->vmacache_seqnum = 0;
|
||||
atomic_set(&mm->mm_users, 1);
|
||||
atomic_set(&mm->mm_count, 1);
|
||||
seqcount_init(&mm->write_protect_seq);
|
||||
mmap_init_lock(mm);
|
||||
INIT_LIST_HEAD(&mm->mmlist);
|
||||
mm->core_state = NULL;
|
||||
|
18
mm/gup.c
18
mm/gup.c
@ -2684,11 +2684,18 @@ static unsigned long lockless_pages_from_mm(unsigned long start,
|
||||
{
|
||||
unsigned long flags;
|
||||
int nr_pinned = 0;
|
||||
unsigned seq;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
|
||||
!gup_fast_permitted(start, end))
|
||||
return 0;
|
||||
|
||||
if (gup_flags & FOLL_PIN) {
|
||||
seq = raw_read_seqcount(¤t->mm->write_protect_seq);
|
||||
if (seq & 1)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable interrupts. The nested form is used, in order to allow full,
|
||||
* general purpose use of this routine.
|
||||
@ -2703,6 +2710,17 @@ static unsigned long lockless_pages_from_mm(unsigned long start,
|
||||
local_irq_save(flags);
|
||||
gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* When pinning pages for DMA there could be a concurrent write protect
|
||||
* from fork() via copy_page_range(), in this case always fail fast GUP.
|
||||
*/
|
||||
if (gup_flags & FOLL_PIN) {
|
||||
if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) {
|
||||
unpin_user_pages(pages, nr_pinned);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return nr_pinned;
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ struct mm_struct init_mm = {
|
||||
.pgd = swapper_pg_dir,
|
||||
.mm_users = ATOMIC_INIT(2),
|
||||
.mm_count = ATOMIC_INIT(1),
|
||||
.write_protect_seq = SEQCNT_ZERO(init_mm.write_protect_seq),
|
||||
MMAP_LOCK_INITIALIZER(init_mm)
|
||||
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
|
||||
.arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
|
||||
|
13
mm/memory.c
13
mm/memory.c
@ -1171,6 +1171,15 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
|
||||
0, src_vma, src_mm, addr, end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
/*
|
||||
* Disabling preemption is not needed for the write side, as
|
||||
* the read side doesn't spin, but goes to the mmap_lock.
|
||||
*
|
||||
* Use the raw variant of the seqcount_t write API to avoid
|
||||
* lockdep complaining about preemptibility.
|
||||
*/
|
||||
mmap_assert_write_locked(src_mm);
|
||||
raw_write_seqcount_begin(&src_mm->write_protect_seq);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
@ -1187,8 +1196,10 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
|
||||
}
|
||||
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
|
||||
|
||||
if (is_cow)
|
||||
if (is_cow) {
|
||||
raw_write_seqcount_end(&src_mm->write_protect_seq);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user