mm/rmap: split try_to_munlock from try_to_unmap
The behaviour of try_to_unmap_one() is difficult to follow because it performs different operations based on a fairly large set of flags used in different combinations. TTU_MUNLOCK is one such flag. However it is exclusively used by try_to_munlock() which specifies no other flags. Therefore rather than overload try_to_unmap_one() with unrelated behaviour split this out into it's own function and remove the flag. Link: https://lkml.kernel.org/r/20210616105937.23201-4-apopple@nvidia.com Signed-off-by: Alistair Popple <apopple@nvidia.com> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Peter Xu <peterx@redhat.com> Cc: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
4dd845b5a3
commit
cd62734ca6
68
mm/rmap.c
68
mm/rmap.c
@@ -1411,10 +1411,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
if (flags & TTU_SYNC)
|
||||
pvmw.flags = PVMW_SYNC;
|
||||
|
||||
/* munlock has nothing to gain from examining un-locked vmas */
|
||||
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
|
||||
return true;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
|
||||
is_zone_device_page(page) && !is_device_private_page(page))
|
||||
return true;
|
||||
@@ -1476,8 +1472,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
page_vma_mapped_walk_done(&pvmw);
|
||||
break;
|
||||
}
|
||||
if (flags & TTU_MUNLOCK)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Unexpected PMD-mapped THP? */
|
||||
@@ -1790,20 +1784,58 @@ void try_to_unmap(struct page *page, enum ttu_flags flags)
|
||||
rmap_walk(page, &rwc);
|
||||
}
|
||||
|
||||
/**
|
||||
* try_to_munlock - try to munlock a page
|
||||
* @page: the page to be munlocked
|
||||
*
|
||||
* Called from munlock code. Checks all of the VMAs mapping the page
|
||||
* to make sure nobody else has this page mlocked. The page will be
|
||||
* returned with PG_mlocked cleared if no other vmas have it mlocked.
|
||||
/*
|
||||
* Walks the vma's mapping a page and mlocks the page if any locked vma's are
|
||||
* found. Once one is found the page is locked and the scan can be terminated.
|
||||
*/
|
||||
static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
|
||||
unsigned long address, void *unused)
|
||||
{
|
||||
struct page_vma_mapped_walk pvmw = {
|
||||
.page = page,
|
||||
.vma = vma,
|
||||
.address = address,
|
||||
};
|
||||
|
||||
void try_to_munlock(struct page *page)
|
||||
/* An un-locked vma doesn't have any pages to lock, continue the scan */
|
||||
if (!(vma->vm_flags & VM_LOCKED))
|
||||
return true;
|
||||
|
||||
while (page_vma_mapped_walk(&pvmw)) {
|
||||
/*
|
||||
* Need to recheck under the ptl to serialise with
|
||||
* __munlock_pagevec_fill() after VM_LOCKED is cleared in
|
||||
* munlock_vma_pages_range().
|
||||
*/
|
||||
if (vma->vm_flags & VM_LOCKED) {
|
||||
/* PTE-mapped THP are never mlocked */
|
||||
if (!PageTransCompound(page))
|
||||
mlock_vma_page(page);
|
||||
page_vma_mapped_walk_done(&pvmw);
|
||||
}
|
||||
|
||||
/*
|
||||
* no need to continue scanning other vma's if the page has
|
||||
* been locked.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* page_mlock - try to mlock a page
|
||||
* @page: the page to be mlocked
|
||||
*
|
||||
* Called from munlock code. Checks all of the VMAs mapping the page and mlocks
|
||||
* the page if any are found. The page will be returned with PG_mlocked cleared
|
||||
* if it is not mapped by any locked vmas.
|
||||
*/
|
||||
void page_mlock(struct page *page)
|
||||
{
|
||||
struct rmap_walk_control rwc = {
|
||||
.rmap_one = try_to_unmap_one,
|
||||
.arg = (void *)TTU_MUNLOCK,
|
||||
.rmap_one = page_mlock_one,
|
||||
.done = page_not_mapped,
|
||||
.anon_lock = page_lock_anon_vma_read,
|
||||
|
||||
@@ -1855,7 +1887,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
|
||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||
* contained in the anon_vma struct it points to.
|
||||
*
|
||||
* When called from try_to_munlock(), the mmap_lock of the mm containing the vma
|
||||
* When called from page_mlock(), the mmap_lock of the mm containing the vma
|
||||
* where the page was found will be held for write. So, we won't recheck
|
||||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||
* LOCKED.
|
||||
@@ -1908,7 +1940,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
|
||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||
* contained in the address_space struct it points to.
|
||||
*
|
||||
* When called from try_to_munlock(), the mmap_lock of the mm containing the vma
|
||||
* When called from page_mlock(), the mmap_lock of the mm containing the vma
|
||||
* where the page was found will be held for write. So, we won't recheck
|
||||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||
* LOCKED.
|
||||
|
||||
Reference in New Issue
Block a user