mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
mm: Add unmap_mapping_folio()
Convert both callers of unmap_mapping_page() to call unmap_mapping_folio() instead. Also move zap_details from linux/mm.h to mm/memory.c Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
parent
efe99bba28
commit
3506659e18
@ -1825,28 +1825,6 @@ static inline bool can_do_mlock(void) { return false; }
|
||||
extern int user_shm_lock(size_t, struct ucounts *);
|
||||
extern void user_shm_unlock(size_t, struct ucounts *);
|
||||
|
||||
/*
|
||||
* Parameter block passed down to zap_pte_range in exceptional cases.
|
||||
*/
|
||||
struct zap_details {
|
||||
struct address_space *zap_mapping; /* Check page->mapping if set */
|
||||
struct page *single_page; /* Locked page to be unmapped */
|
||||
};
|
||||
|
||||
/*
|
||||
* We set details->zap_mappings when we want to unmap shared but keep private
|
||||
* pages. Return true if skip zapping this page, false otherwise.
|
||||
*/
|
||||
static inline bool
|
||||
zap_skip_check_mapping(struct zap_details *details, struct page *page)
|
||||
{
|
||||
if (!details || !page)
|
||||
return false;
|
||||
|
||||
return details->zap_mapping &&
|
||||
(details->zap_mapping != page_rmapping(page));
|
||||
}
|
||||
|
||||
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
pte_t pte);
|
||||
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
@ -1892,7 +1870,6 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
|
||||
extern int fixup_user_fault(struct mm_struct *mm,
|
||||
unsigned long address, unsigned int fault_flags,
|
||||
bool *unlocked);
|
||||
void unmap_mapping_page(struct page *page);
|
||||
void unmap_mapping_pages(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t nr, bool even_cows);
|
||||
void unmap_mapping_range(struct address_space *mapping,
|
||||
@ -1913,7 +1890,6 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
|
||||
BUG();
|
||||
return -EFAULT;
|
||||
}
|
||||
static inline void unmap_mapping_page(struct page *page) { }
|
||||
static inline void unmap_mapping_pages(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t nr, bool even_cows) { }
|
||||
static inline void unmap_mapping_range(struct address_space *mapping,
|
||||
|
@ -74,6 +74,7 @@ static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
|
||||
return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
|
||||
}
|
||||
|
||||
struct zap_details;
|
||||
void unmap_page_range(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end,
|
||||
@ -388,6 +389,7 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
void unmap_mapping_folio(struct folio *folio);
|
||||
extern long populate_vma_page_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end, int *locked);
|
||||
extern long faultin_vma_page_range(struct vm_area_struct *vma,
|
||||
@ -491,8 +493,8 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
|
||||
}
|
||||
return fpin;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_MMU */
|
||||
static inline void unmap_mapping_folio(struct folio *folio) { }
|
||||
static inline void clear_page_mlock(struct page *page) { }
|
||||
static inline void mlock_vma_page(struct page *page) { }
|
||||
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
|
||||
|
49
mm/memory.c
49
mm/memory.c
@ -1304,6 +1304,28 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parameter block passed down to zap_pte_range in exceptional cases.
|
||||
*/
|
||||
struct zap_details {
|
||||
struct address_space *zap_mapping; /* Check page->mapping if set */
|
||||
struct folio *single_folio; /* Locked folio to be unmapped */
|
||||
};
|
||||
|
||||
/*
|
||||
* We set details->zap_mapping when we want to unmap shared but keep private
|
||||
* pages. Return true if skip zapping this page, false otherwise.
|
||||
*/
|
||||
static inline bool
|
||||
zap_skip_check_mapping(struct zap_details *details, struct page *page)
|
||||
{
|
||||
if (!details || !page)
|
||||
return false;
|
||||
|
||||
return details->zap_mapping &&
|
||||
(details->zap_mapping != page_rmapping(page));
|
||||
}
|
||||
|
||||
static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end,
|
||||
@ -1443,8 +1465,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
|
||||
else if (zap_huge_pmd(tlb, vma, pmd, addr))
|
||||
goto next;
|
||||
/* fall through */
|
||||
} else if (details && details->single_page &&
|
||||
PageTransCompound(details->single_page) &&
|
||||
} else if (details && details->single_folio &&
|
||||
folio_test_pmd_mappable(details->single_folio) &&
|
||||
next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
|
||||
spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
|
||||
/*
|
||||
@ -3332,31 +3354,30 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
|
||||
}
|
||||
|
||||
/**
|
||||
* unmap_mapping_page() - Unmap single page from processes.
|
||||
* @page: The locked page to be unmapped.
|
||||
* unmap_mapping_folio() - Unmap single folio from processes.
|
||||
* @folio: The locked folio to be unmapped.
|
||||
*
|
||||
* Unmap this page from any userspace process which still has it mmaped.
|
||||
* Unmap this folio from any userspace process which still has it mmaped.
|
||||
* Typically, for efficiency, the range of nearby pages has already been
|
||||
* unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
|
||||
* truncation or invalidation holds the lock on a page, it may find that
|
||||
* the page has been remapped again: and then uses unmap_mapping_page()
|
||||
* truncation or invalidation holds the lock on a folio, it may find that
|
||||
* the page has been remapped again: and then uses unmap_mapping_folio()
|
||||
* to unmap it finally.
|
||||
*/
|
||||
void unmap_mapping_page(struct page *page)
|
||||
void unmap_mapping_folio(struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct zap_details details = { };
|
||||
pgoff_t first_index;
|
||||
pgoff_t last_index;
|
||||
|
||||
VM_BUG_ON(!PageLocked(page));
|
||||
VM_BUG_ON(PageTail(page));
|
||||
VM_BUG_ON(!folio_test_locked(folio));
|
||||
|
||||
first_index = page->index;
|
||||
last_index = page->index + thp_nr_pages(page) - 1;
|
||||
first_index = folio->index;
|
||||
last_index = folio->index + folio_nr_pages(folio) - 1;
|
||||
|
||||
details.zap_mapping = mapping;
|
||||
details.single_page = page;
|
||||
details.single_folio = folio;
|
||||
|
||||
i_mmap_lock_write(mapping);
|
||||
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
|
||||
|
@ -180,7 +180,7 @@ void do_invalidatepage(struct page *page, unsigned int offset,
|
||||
static void truncate_cleanup_folio(struct folio *folio)
|
||||
{
|
||||
if (folio_mapped(folio))
|
||||
unmap_mapping_page(&folio->page);
|
||||
unmap_mapping_folio(folio);
|
||||
|
||||
if (folio_has_private(folio))
|
||||
do_invalidatepage(&folio->page, 0, folio_size(folio));
|
||||
@ -670,7 +670,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
wait_on_page_writeback(page);
|
||||
|
||||
if (page_mapped(page))
|
||||
unmap_mapping_page(page);
|
||||
unmap_mapping_folio(page_folio(page));
|
||||
BUG_ON(page_mapped(page));
|
||||
|
||||
ret2 = do_launder_page(mapping, page);
|
||||
|
Loading…
Reference in New Issue
Block a user