mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
mm: change vmf_anon_prepare() to __vmf_anon_prepare()
Some callers of vmf_anon_prepare() may not want us to release the per-VMA
lock ourselves. Rename vmf_anon_prepare() to __vmf_anon_prepare() and let
the callers drop the lock when desired.
Also, make vmf_anon_prepare() a wrapper that releases the per-VMA lock
itself for any callers that don't care.
This is in preparation to fix this bug reported by syzbot:
https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
Link: https://lkml.kernel.org/r/20240914194243.245-1-vishal.moola@gmail.com
Fixes: 9acad7ba3e
("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b4afe4183e
commit
2a058ab328
@ -310,7 +310,16 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
|
|||||||
wake_up(wqh);
|
wake_up(wqh);
|
||||||
}
|
}
|
||||||
|
|
||||||
vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
|
vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
|
||||||
|
static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
|
||||||
|
{
|
||||||
|
vm_fault_t ret = __vmf_anon_prepare(vmf);
|
||||||
|
|
||||||
|
if (unlikely(ret & VM_FAULT_RETRY))
|
||||||
|
vma_end_read(vmf->vma);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
vm_fault_t do_swap_page(struct vm_fault *vmf);
|
vm_fault_t do_swap_page(struct vm_fault *vmf);
|
||||||
void folio_rotate_reclaimable(struct folio *folio);
|
void folio_rotate_reclaimable(struct folio *folio);
|
||||||
bool __folio_end_writeback(struct folio *folio);
|
bool __folio_end_writeback(struct folio *folio);
|
||||||
|
@ -3259,7 +3259,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmf_anon_prepare - Prepare to handle an anonymous fault.
|
* __vmf_anon_prepare - Prepare to handle an anonymous fault.
|
||||||
* @vmf: The vm_fault descriptor passed from the fault handler.
|
* @vmf: The vm_fault descriptor passed from the fault handler.
|
||||||
*
|
*
|
||||||
* When preparing to insert an anonymous page into a VMA from a
|
* When preparing to insert an anonymous page into a VMA from a
|
||||||
@ -3273,7 +3273,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
|
|||||||
* Return: 0 if fault handling can proceed. Any other value should be
|
* Return: 0 if fault handling can proceed. Any other value should be
|
||||||
* returned to the caller.
|
* returned to the caller.
|
||||||
*/
|
*/
|
||||||
vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
|
vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
vm_fault_t ret = 0;
|
vm_fault_t ret = 0;
|
||||||
@ -3281,10 +3281,8 @@ vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
|
|||||||
if (likely(vma->anon_vma))
|
if (likely(vma->anon_vma))
|
||||||
return 0;
|
return 0;
|
||||||
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
|
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
|
||||||
if (!mmap_read_trylock(vma->vm_mm)) {
|
if (!mmap_read_trylock(vma->vm_mm))
|
||||||
vma_end_read(vma);
|
|
||||||
return VM_FAULT_RETRY;
|
return VM_FAULT_RETRY;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (__anon_vma_prepare(vma))
|
if (__anon_vma_prepare(vma))
|
||||||
ret = VM_FAULT_OOM;
|
ret = VM_FAULT_OOM;
|
||||||
|
Loading…
Reference in New Issue
Block a user