mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
mm: introduce __vm_flags_mod and use it in untrack_pfn
There are scenarios when vm_flags can be modified without exclusive mmap_lock, such as: - after VMA was isolated and mmap_lock was downgraded or dropped - in exit_mmap when there are no other mm users and locking is unnecessary Introduce __vm_flags_mod to avoid assertions when the caller takes responsibility for the required locking. Pass a hint to untrack_pfn to conditionally use __vm_flags_mod for flags modification to avoid assertion. Link: https://lkml.kernel.org/r/20230126193752.297968-7-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arjun Roy <arjunroy@google.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: David Rientjes <rientjes@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jann Horn <jannh@google.com> Cc: Joel Fernandes <joelaf@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Laurent Dufour <ldufour@linux.ibm.com> Cc: Liam R. Howlett <Liam.Howlett@Oracle.com> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@google.com> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Peter Oskolkov <posk@google.com> Cc: Peter Xu <peterx@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Punit Agrawal <punit.agrawal@bytedance.com> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Sebastian Reichel <sebastian.reichel@collabora.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Soheil Hassas Yeganeh <soheil@google.com> Cc: Song Liu <songliubraving@fb.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ff126c0ece
commit
68f48381d7
@ -1046,7 +1046,7 @@ void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
|
||||
* can be for the entire vma (in which case pfn, size are zero).
|
||||
*/
|
||||
void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
unsigned long size)
|
||||
unsigned long size, bool mm_wr_locked)
|
||||
{
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
@ -1065,8 +1065,12 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
size = vma->vm_end - vma->vm_start;
|
||||
}
|
||||
free_pfn_range(paddr, size);
|
||||
if (vma)
|
||||
vm_flags_clear(vma, VM_PAT);
|
||||
if (vma) {
|
||||
if (mm_wr_locked)
|
||||
vm_flags_clear(vma, VM_PAT);
|
||||
else
|
||||
__vm_flags_mod(vma, 0, VM_PAT);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -656,6 +656,16 @@ static inline void vm_flags_clear(struct vm_area_struct *vma,
|
||||
ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use only if VMA is not part of the VMA tree or has no other users and
|
||||
* therefore needs no locking.
|
||||
*/
|
||||
static inline void __vm_flags_mod(struct vm_area_struct *vma,
|
||||
vm_flags_t set, vm_flags_t clear)
|
||||
{
|
||||
vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use only when the order of set/clear operations is unimportant, otherwise
|
||||
* use vm_flags_{set|clear} explicitly.
|
||||
@ -664,7 +674,7 @@ static inline void vm_flags_mod(struct vm_area_struct *vma,
|
||||
vm_flags_t set, vm_flags_t clear)
|
||||
{
|
||||
mmap_assert_write_locked(vma->vm_mm);
|
||||
vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
|
||||
__vm_flags_mod(vma, set, clear);
|
||||
}
|
||||
|
||||
static inline void vma_set_anonymous(struct vm_area_struct *vma)
|
||||
@ -2085,7 +2095,7 @@ static inline void zap_vma_pages(struct vm_area_struct *vma)
|
||||
}
|
||||
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *start_vma, unsigned long start,
|
||||
unsigned long end);
|
||||
unsigned long end, bool mm_wr_locked);
|
||||
|
||||
struct mmu_notifier_range;
|
||||
|
||||
|
@ -1185,7 +1185,8 @@ static inline int track_pfn_copy(struct vm_area_struct *vma)
|
||||
* can be for the entire vma (in which case pfn, size are zero).
|
||||
*/
|
||||
static inline void untrack_pfn(struct vm_area_struct *vma,
|
||||
unsigned long pfn, unsigned long size)
|
||||
unsigned long pfn, unsigned long size,
|
||||
bool mm_wr_locked)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1203,7 +1204,7 @@ extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
|
||||
pfn_t pfn);
|
||||
extern int track_pfn_copy(struct vm_area_struct *vma);
|
||||
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
unsigned long size);
|
||||
unsigned long size, bool mm_wr_locked);
|
||||
extern void untrack_pfn_moved(struct vm_area_struct *vma);
|
||||
#endif
|
||||
|
||||
|
13
mm/memory.c
13
mm/memory.c
@ -1613,7 +1613,7 @@ void unmap_page_range(struct mmu_gather *tlb,
|
||||
static void unmap_single_vma(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long start_addr,
|
||||
unsigned long end_addr,
|
||||
struct zap_details *details)
|
||||
struct zap_details *details, bool mm_wr_locked)
|
||||
{
|
||||
unsigned long start = max(vma->vm_start, start_addr);
|
||||
unsigned long end;
|
||||
@ -1628,7 +1628,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
|
||||
uprobe_munmap(vma, start, end);
|
||||
|
||||
if (unlikely(vma->vm_flags & VM_PFNMAP))
|
||||
untrack_pfn(vma, 0, 0);
|
||||
untrack_pfn(vma, 0, 0, mm_wr_locked);
|
||||
|
||||
if (start != end) {
|
||||
if (unlikely(is_vm_hugetlb_page(vma))) {
|
||||
@ -1675,7 +1675,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
|
||||
*/
|
||||
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, unsigned long start_addr,
|
||||
unsigned long end_addr)
|
||||
unsigned long end_addr, bool mm_wr_locked)
|
||||
{
|
||||
struct mmu_notifier_range range;
|
||||
struct zap_details details = {
|
||||
@ -1689,7 +1689,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
start_addr, end_addr);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
do {
|
||||
unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
|
||||
unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
|
||||
mm_wr_locked);
|
||||
} while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
}
|
||||
@ -1723,7 +1724,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
|
||||
* unmap 'address-end' not 'range.start-range.end' as range
|
||||
* could have been expanded for hugetlb pmd sharing.
|
||||
*/
|
||||
unmap_single_vma(&tlb, vma, address, end, details);
|
||||
unmap_single_vma(&tlb, vma, address, end, details, false);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
@ -2492,7 +2493,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
||||
|
||||
err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
|
||||
if (err)
|
||||
untrack_pfn(vma, pfn, PAGE_ALIGN(size));
|
||||
untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(remap_pfn_range);
|
||||
|
@ -129,7 +129,7 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
|
||||
}
|
||||
mem_hotplug_done();
|
||||
|
||||
untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
|
||||
untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true);
|
||||
pgmap_array_delete(range);
|
||||
}
|
||||
|
||||
@ -276,7 +276,7 @@ err_add_memory:
|
||||
if (!is_private)
|
||||
kasan_remove_zero_shadow(__va(range->start), range_len(range));
|
||||
err_kasan:
|
||||
untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
|
||||
untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true);
|
||||
err_pfn_remap:
|
||||
pgmap_array_delete(range);
|
||||
return error;
|
||||
|
16
mm/mmap.c
16
mm/mmap.c
@ -78,7 +78,7 @@ core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
|
||||
static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
||||
struct vm_area_struct *next, unsigned long start,
|
||||
unsigned long end);
|
||||
unsigned long end, bool mm_wr_locked);
|
||||
|
||||
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
|
||||
{
|
||||
@ -2133,14 +2133,14 @@ static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
|
||||
static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
||||
struct vm_area_struct *next,
|
||||
unsigned long start, unsigned long end)
|
||||
unsigned long start, unsigned long end, bool mm_wr_locked)
|
||||
{
|
||||
struct mmu_gather tlb;
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
update_hiwater_rss(mm);
|
||||
unmap_vmas(&tlb, mt, vma, start, end);
|
||||
unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked);
|
||||
free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING);
|
||||
tlb_finish_mmu(&tlb);
|
||||
@ -2388,7 +2388,11 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
mmap_write_downgrade(mm);
|
||||
}
|
||||
|
||||
unmap_region(mm, &mt_detach, vma, prev, next, start, end);
|
||||
/*
|
||||
* We can free page tables without write-locking mmap_lock because VMAs
|
||||
* were isolated before we downgraded mmap_lock.
|
||||
*/
|
||||
unmap_region(mm, &mt_detach, vma, prev, next, start, end, !downgrade);
|
||||
/* Statistics and freeing VMAs */
|
||||
mas_set(&mas_detach, start);
|
||||
remove_mt(mm, &mas_detach);
|
||||
@ -2701,7 +2705,7 @@ unmap_and_free_vma:
|
||||
|
||||
/* Undo any partial mapping done by a device driver. */
|
||||
unmap_region(mm, &mm->mm_mt, vma, prev, next, vma->vm_start,
|
||||
vma->vm_end);
|
||||
vma->vm_end, true);
|
||||
}
|
||||
if (file && (vm_flags & VM_SHARED))
|
||||
mapping_unmap_writable(file->f_mapping);
|
||||
@ -3029,7 +3033,7 @@ void exit_mmap(struct mm_struct *mm)
|
||||
tlb_gather_mmu_fullmm(&tlb, mm);
|
||||
/* update_hiwater_rss(mm) here? but nobody should be looking */
|
||||
/* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
|
||||
unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX);
|
||||
unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX, false);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user