mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
mm: convert ksm_might_need_to_copy() to work on folios
Patch series "Finish two folio conversions". Most callers of page_add_new_anon_rmap() and lru_cache_add_inactive_or_unevictable() have been converted to their folio equivalents, but there are still a few stragglers. There's a bit of preparatory work in ksm and unuse_pte(), but after that it's pretty mechanical. This patch (of 9): Accept a folio as an argument and return a folio result. Removes a call to compound_head() in do_swap_page(), and prevents folio & page from getting out of sync in unuse_pte(). Reviewed-by: David Hildenbrand <david@redhat.com> [willy@infradead.org: fix smatch warning] Link: https://lkml.kernel.org/r/ZXnPtblC6A1IkyAB@casper.infradead.org [david@redhat.com: only adjust the page if the folio changed] Link: https://lkml.kernel.org/r/6a8f2110-fa91-4c10-9eae-88315309a6e3@redhat.com Link: https://lkml.kernel.org/r/20231211162214.2146080-1-willy@infradead.org Link: https://lkml.kernel.org/r/20231211162214.2146080-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a2bf6a9ca8
commit
96db66d9c8
@ -76,7 +76,7 @@ static inline void ksm_exit(struct mm_struct *mm)
|
||||
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
|
||||
* but what if the vma was unmerged while the page was swapped out?
|
||||
*/
|
||||
struct page *ksm_might_need_to_copy(struct page *page,
|
||||
struct folio *ksm_might_need_to_copy(struct folio *folio,
|
||||
struct vm_area_struct *vma, unsigned long addr);
|
||||
|
||||
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
|
||||
@ -129,10 +129,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
static inline void rmap_walk_ksm(struct folio *folio,
|
||||
|
21
mm/ksm.c
21
mm/ksm.c
@ -2873,30 +2873,30 @@ void __ksm_exit(struct mm_struct *mm)
|
||||
trace_ksm_exit(mm);
|
||||
}
|
||||
|
||||
struct page *ksm_might_need_to_copy(struct page *page,
|
||||
struct folio *ksm_might_need_to_copy(struct folio *folio,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct page *page = folio_page(folio, 0);
|
||||
struct anon_vma *anon_vma = folio_anon_vma(folio);
|
||||
struct folio *new_folio;
|
||||
|
||||
if (folio_test_large(folio))
|
||||
return page;
|
||||
return folio;
|
||||
|
||||
if (folio_test_ksm(folio)) {
|
||||
if (folio_stable_node(folio) &&
|
||||
!(ksm_run & KSM_RUN_UNMERGE))
|
||||
return page; /* no need to copy it */
|
||||
return folio; /* no need to copy it */
|
||||
} else if (!anon_vma) {
|
||||
return page; /* no need to copy it */
|
||||
return folio; /* no need to copy it */
|
||||
} else if (folio->index == linear_page_index(vma, addr) &&
|
||||
anon_vma->root == vma->anon_vma->root) {
|
||||
return page; /* still no need to copy it */
|
||||
return folio; /* still no need to copy it */
|
||||
}
|
||||
if (PageHWPoison(page))
|
||||
return ERR_PTR(-EHWPOISON);
|
||||
if (!folio_test_uptodate(folio))
|
||||
return page; /* let do_swap_page report the error */
|
||||
return folio; /* let do_swap_page report the error */
|
||||
|
||||
new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
|
||||
if (new_folio &&
|
||||
@ -2905,9 +2905,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
||||
new_folio = NULL;
|
||||
}
|
||||
if (new_folio) {
|
||||
if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) {
|
||||
if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
|
||||
addr, vma)) {
|
||||
folio_put(new_folio);
|
||||
memory_failure_queue(page_to_pfn(page), 0);
|
||||
memory_failure_queue(folio_pfn(folio), 0);
|
||||
return ERR_PTR(-EHWPOISON);
|
||||
}
|
||||
folio_set_dirty(new_folio);
|
||||
@ -2918,7 +2919,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
||||
#endif
|
||||
}
|
||||
|
||||
return new_folio ? &new_folio->page : NULL;
|
||||
return new_folio;
|
||||
}
|
||||
|
||||
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
|
||||
|
11
mm/memory.c
11
mm/memory.c
@ -3942,15 +3942,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
* page->index of !PageKSM() pages would be nonlinear inside the
|
||||
* anon VMA -- PageKSM() is lost on actual swapout.
|
||||
*/
|
||||
page = ksm_might_need_to_copy(page, vma, vmf->address);
|
||||
if (unlikely(!page)) {
|
||||
folio = ksm_might_need_to_copy(folio, vma, vmf->address);
|
||||
if (unlikely(!folio)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
folio = swapcache;
|
||||
goto out_page;
|
||||
} else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
|
||||
} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
|
||||
ret = VM_FAULT_HWPOISON;
|
||||
folio = swapcache;
|
||||
goto out_page;
|
||||
}
|
||||
folio = page_folio(page);
|
||||
if (folio != swapcache)
|
||||
page = folio_page(folio, 0);
|
||||
|
||||
/*
|
||||
* If we want to map a page that's in the swapcache writable, we
|
||||
|
@ -1749,11 +1749,13 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
int ret = 1;
|
||||
|
||||
swapcache = page;
|
||||
page = ksm_might_need_to_copy(page, vma, addr);
|
||||
if (unlikely(!page))
|
||||
folio = ksm_might_need_to_copy(folio, vma, addr);
|
||||
if (unlikely(!folio))
|
||||
return -ENOMEM;
|
||||
else if (unlikely(PTR_ERR(page) == -EHWPOISON))
|
||||
else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
|
||||
hwpoisoned = true;
|
||||
else
|
||||
page = folio_file_page(folio, swp_offset(entry));
|
||||
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
|
||||
|
Loading…
Reference in New Issue
Block a user