hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()

hugetlb_no_page() and hugetlb_wp() call anon_vma_prepare().  In
preparation for hugetlb to safely handle faults under the VMA lock, use
vmf_anon_prepare() here instead.

Additionally, passing hugetlb_wp() the vm_fault struct from
hugetlb_fault() works toward cleaning up the hugetlb code and function
stack.

Link: https://lkml.kernel.org/r/20240221234732.187629-5-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Vishal Moola (Oracle) 2024-02-21 15:47:31 -08:00 committed by Andrew Morton
parent 7dac0ec8fa
commit 9acad7ba3e

View File

@ -5851,7 +5851,8 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
*/
static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int flags,
struct folio *pagecache_folio, spinlock_t *ptl)
struct folio *pagecache_folio, spinlock_t *ptl,
struct vm_fault *vmf)
{
const bool unshare = flags & FAULT_FLAG_UNSHARE;
pte_t pte = huge_ptep_get(ptep);
@ -5985,10 +5986,9 @@ retry_avoidcopy:
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
ret = vmf_anon_prepare(vmf);
if (unlikely(ret))
goto out_release_all;
}
if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
ret = VM_FAULT_HWPOISON_LARGE;
@ -6228,10 +6228,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
new_pagecache_folio = true;
} else {
folio_lock(folio);
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
ret = vmf_anon_prepare(vmf);
if (unlikely(ret))
goto backout_unlocked;
}
anon_rmap = 1;
}
} else {
@ -6298,7 +6298,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl, vmf);
}
spin_unlock(ptl);
@ -6521,7 +6521,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
if (!huge_pte_write(entry)) {
ret = hugetlb_wp(mm, vma, address, ptep, flags,
pagecache_folio, ptl);
pagecache_folio, ptl, &vmf);
goto out_put_page;
} else if (likely(flags & FAULT_FLAG_WRITE)) {
entry = huge_pte_mkdirty(entry);