mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
mm: provide mm_struct and address to huge_ptep_get()
On powerpc 8xx huge_ptep_get() will need to know whether the given ptep is a PTE entry or a PMD entry. This cannot be known with the PMD entry itself because there is no easy way to know it from the content of the entry. So huge_ptep_get() will need to know either the size of the page or get the pmd. In order to be consistent with huge_ptep_get_and_clear(), give mm and address to huge_ptep_get(). Link: https://lkml.kernel.org/r/cc00c70dd384298796a4e1b25d6c4eb306d3af85.1719928057.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
18d095b255
commit
e6c0c03245
@ -13,12 +13,12 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If our huge pte is non-zero then mark the valid bit.
|
* If our huge pte is non-zero then mark the valid bit.
|
||||||
* This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
|
* This allows pte_present(huge_ptep_get(mm,addr,ptep)) to return true for non-zero
|
||||||
* ptes.
|
* ptes.
|
||||||
* (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
|
* (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
|
||||||
*/
|
*/
|
||||||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
pte_t retval = *ptep;
|
pte_t retval = *ptep;
|
||||||
if (pte_val(retval))
|
if (pte_val(retval))
|
||||||
|
@ -46,7 +46,7 @@ extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|||||||
extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||||
pte_t *ptep, unsigned long sz);
|
pte_t *ptep, unsigned long sz);
|
||||||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||||
extern pte_t huge_ptep_get(pte_t *ptep);
|
extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||||
|
|
||||||
void __init arm64_hugetlb_cma_reserve(void);
|
void __init arm64_hugetlb_cma_reserve(void);
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
|||||||
return contig_ptes;
|
return contig_ptes;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t huge_ptep_get(pte_t *ptep)
|
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
int ncontig, i;
|
int ncontig, i;
|
||||||
size_t pgsize;
|
size_t pgsize;
|
||||||
|
@ -44,7 +44,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||||||
pte_t pte, int dirty);
|
pte_t pte, int dirty);
|
||||||
|
|
||||||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||||
pte_t huge_ptep_get(pte_t *ptep);
|
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||||
|
|
||||||
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
|
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
|
||||||
#define arch_make_huge_pte arch_make_huge_pte
|
#define arch_make_huge_pte arch_make_huge_pte
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_ISA_SVNAPOT
|
#ifdef CONFIG_RISCV_ISA_SVNAPOT
|
||||||
pte_t huge_ptep_get(pte_t *ptep)
|
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
unsigned long pte_num;
|
unsigned long pte_num;
|
||||||
int i;
|
int i;
|
||||||
|
@ -19,7 +19,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||||||
pte_t *ptep, pte_t pte, unsigned long sz);
|
pte_t *ptep, pte_t pte, unsigned long sz);
|
||||||
void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||||
pte_t *ptep, pte_t pte);
|
pte_t *ptep, pte_t pte);
|
||||||
pte_t huge_ptep_get(pte_t *ptep);
|
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||||
unsigned long addr, pte_t *ptep);
|
unsigned long addr, pte_t *ptep);
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||||||
unsigned long addr, pte_t *ptep,
|
unsigned long addr, pte_t *ptep,
|
||||||
pte_t pte, int dirty)
|
pte_t pte, int dirty)
|
||||||
{
|
{
|
||||||
int changed = !pte_same(huge_ptep_get(ptep), pte);
|
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
|
||||||
if (changed) {
|
if (changed) {
|
||||||
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||||
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||||
|
@ -169,7 +169,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||||||
__set_huge_pte_at(mm, addr, ptep, pte);
|
__set_huge_pte_at(mm, addr, ptep, pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t huge_ptep_get(pte_t *ptep)
|
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
return __rste_to_pte(pte_val(*ptep));
|
return __rste_to_pte(pte_val(*ptep));
|
||||||
}
|
}
|
||||||
@ -177,7 +177,7 @@ pte_t huge_ptep_get(pte_t *ptep)
|
|||||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||||
unsigned long addr, pte_t *ptep)
|
unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
pte_t pte = huge_ptep_get(ptep);
|
pte_t pte = huge_ptep_get(mm, addr, ptep);
|
||||||
pmd_t *pmdp = (pmd_t *) ptep;
|
pmd_t *pmdp = (pmd_t *) ptep;
|
||||||
pud_t *pudp = (pud_t *) ptep;
|
pud_t *pudp = (pud_t *) ptep;
|
||||||
|
|
||||||
|
@ -422,7 +422,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
|
|||||||
if (!ptep)
|
if (!ptep)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(vma->vm_mm, addr, ptep);
|
||||||
if (huge_pte_none(pte) || !pte_present(pte))
|
if (huge_pte_none(pte) || !pte_present(pte))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -1013,7 +1013,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
|
|||||||
{
|
{
|
||||||
struct mem_size_stats *mss = walk->private;
|
struct mem_size_stats *mss = walk->private;
|
||||||
struct vm_area_struct *vma = walk->vma;
|
struct vm_area_struct *vma = walk->vma;
|
||||||
pte_t ptent = huge_ptep_get(pte);
|
pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
|
||||||
struct folio *folio = NULL;
|
struct folio *folio = NULL;
|
||||||
bool present = false;
|
bool present = false;
|
||||||
|
|
||||||
@ -1878,7 +1878,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
|
|||||||
if (vma->vm_flags & VM_SOFTDIRTY)
|
if (vma->vm_flags & VM_SOFTDIRTY)
|
||||||
flags |= PM_SOFT_DIRTY;
|
flags |= PM_SOFT_DIRTY;
|
||||||
|
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(walk->mm, addr, ptep);
|
||||||
if (pte_present(pte)) {
|
if (pte_present(pte)) {
|
||||||
struct folio *folio = page_folio(pte_page(pte));
|
struct folio *folio = page_folio(pte_page(pte));
|
||||||
|
|
||||||
@ -2567,7 +2567,7 @@ static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
|
|||||||
if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
|
if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
|
||||||
/* Go the short route when not write-protecting pages. */
|
/* Go the short route when not write-protecting pages. */
|
||||||
|
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(walk->mm, start, ptep);
|
||||||
categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
|
categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
|
||||||
|
|
||||||
if (!pagemap_scan_is_interesting_page(categories, p))
|
if (!pagemap_scan_is_interesting_page(categories, p))
|
||||||
@ -2579,7 +2579,7 @@ static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
|
|||||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||||
ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
|
ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
|
||||||
|
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(walk->mm, start, ptep);
|
||||||
categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
|
categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
|
||||||
|
|
||||||
if (!pagemap_scan_is_interesting_page(categories, p))
|
if (!pagemap_scan_is_interesting_page(categories, p))
|
||||||
@ -2975,7 +2975,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
|
|||||||
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
|
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
|
||||||
unsigned long addr, unsigned long end, struct mm_walk *walk)
|
unsigned long addr, unsigned long end, struct mm_walk *walk)
|
||||||
{
|
{
|
||||||
pte_t huge_pte = huge_ptep_get(pte);
|
pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
|
||||||
struct numa_maps *md;
|
struct numa_maps *md;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
|
@ -257,7 +257,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = false;
|
ret = false;
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(vma->vm_mm, vmf->address, ptep);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lockless access: we're in a wait_event so it's ok if it
|
* Lockless access: we're in a wait_event so it's ok if it
|
||||||
|
@ -144,7 +144,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
|
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
|
||||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
return ptep_get(ptep);
|
return ptep_get(ptep);
|
||||||
}
|
}
|
||||||
|
@ -334,7 +334,7 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
|
|||||||
|
|
||||||
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
|
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
|
||||||
unsigned long address);
|
unsigned long address);
|
||||||
extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
|
extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
|
||||||
#else /* CONFIG_MIGRATION */
|
#else /* CONFIG_MIGRATION */
|
||||||
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
|
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
|
||||||
{
|
{
|
||||||
@ -359,7 +359,7 @@ static inline int is_migration_entry(swp_entry_t swp)
|
|||||||
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
|
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
|
||||||
unsigned long address) { }
|
unsigned long address) { }
|
||||||
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
|
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
|
||||||
pte_t *pte) { }
|
unsigned long addr, pte_t *pte) { }
|
||||||
static inline int is_writable_migration_entry(swp_entry_t entry)
|
static inline int is_writable_migration_entry(swp_entry_t entry)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -339,7 +339,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
|
|||||||
struct vm_area_struct *vma, unsigned long addr)
|
struct vm_area_struct *vma, unsigned long addr)
|
||||||
{
|
{
|
||||||
bool referenced = false;
|
bool referenced = false;
|
||||||
pte_t entry = huge_ptep_get(pte);
|
pte_t entry = huge_ptep_get(mm, addr, pte);
|
||||||
struct folio *folio = pfn_folio(pte_pfn(entry));
|
struct folio *folio = pfn_folio(pte_pfn(entry));
|
||||||
unsigned long psize = huge_page_size(hstate_vma(vma));
|
unsigned long psize = huge_page_size(hstate_vma(vma));
|
||||||
|
|
||||||
@ -373,7 +373,7 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
|||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
|
||||||
ptl = huge_pte_lock(h, walk->mm, pte);
|
ptl = huge_pte_lock(h, walk->mm, pte);
|
||||||
entry = huge_ptep_get(pte);
|
entry = huge_ptep_get(walk->mm, addr, pte);
|
||||||
if (!pte_present(entry))
|
if (!pte_present(entry))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -509,7 +509,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
|||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
|
||||||
ptl = huge_pte_lock(h, walk->mm, pte);
|
ptl = huge_pte_lock(h, walk->mm, pte);
|
||||||
entry = huge_ptep_get(pte);
|
entry = huge_ptep_get(walk->mm, addr, pte);
|
||||||
if (!pte_present(entry))
|
if (!pte_present(entry))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
2
mm/gup.c
2
mm/gup.c
@ -604,7 +604,7 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
|
|||||||
if (pte_end < end)
|
if (pte_end < end)
|
||||||
end = pte_end;
|
end = pte_end;
|
||||||
|
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(vma->vm_mm, addr, ptep);
|
||||||
|
|
||||||
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
|
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
|
||||||
return 0;
|
return 0;
|
||||||
|
2
mm/hmm.c
2
mm/hmm.c
@ -480,7 +480,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
|||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
|
||||||
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
|
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
|
||||||
entry = huge_ptep_get(pte);
|
entry = huge_ptep_get(walk->mm, addr, pte);
|
||||||
|
|
||||||
i = (start - range->start) >> PAGE_SHIFT;
|
i = (start - range->start) >> PAGE_SHIFT;
|
||||||
pfn_req_flags = range->hmm_pfns[i];
|
pfn_req_flags = range->hmm_pfns[i];
|
||||||
|
44
mm/hugetlb.c
44
mm/hugetlb.c
@ -5287,7 +5287,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
|
|||||||
{
|
{
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
|
||||||
entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
|
entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
|
||||||
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
|
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
|
||||||
update_mmu_cache(vma, address, ptep);
|
update_mmu_cache(vma, address, ptep);
|
||||||
}
|
}
|
||||||
@ -5395,7 +5395,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|||||||
dst_ptl = huge_pte_lock(h, dst, dst_pte);
|
dst_ptl = huge_pte_lock(h, dst, dst_pte);
|
||||||
src_ptl = huge_pte_lockptr(h, src, src_pte);
|
src_ptl = huge_pte_lockptr(h, src, src_pte);
|
||||||
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
||||||
entry = huge_ptep_get(src_pte);
|
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
|
||||||
again:
|
again:
|
||||||
if (huge_pte_none(entry)) {
|
if (huge_pte_none(entry)) {
|
||||||
/*
|
/*
|
||||||
@ -5433,7 +5433,7 @@ again:
|
|||||||
set_huge_pte_at(dst, addr, dst_pte,
|
set_huge_pte_at(dst, addr, dst_pte,
|
||||||
make_pte_marker(marker), sz);
|
make_pte_marker(marker), sz);
|
||||||
} else {
|
} else {
|
||||||
entry = huge_ptep_get(src_pte);
|
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
|
||||||
pte_folio = page_folio(pte_page(entry));
|
pte_folio = page_folio(pte_page(entry));
|
||||||
folio_get(pte_folio);
|
folio_get(pte_folio);
|
||||||
|
|
||||||
@ -5474,7 +5474,7 @@ again:
|
|||||||
dst_ptl = huge_pte_lock(h, dst, dst_pte);
|
dst_ptl = huge_pte_lock(h, dst, dst_pte);
|
||||||
src_ptl = huge_pte_lockptr(h, src, src_pte);
|
src_ptl = huge_pte_lockptr(h, src, src_pte);
|
||||||
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
||||||
entry = huge_ptep_get(src_pte);
|
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
|
||||||
if (!pte_same(src_pte_old, entry)) {
|
if (!pte_same(src_pte_old, entry)) {
|
||||||
restore_reserve_on_error(h, dst_vma, addr,
|
restore_reserve_on_error(h, dst_vma, addr,
|
||||||
new_folio);
|
new_folio);
|
||||||
@ -5584,7 +5584,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
|
|||||||
new_addr |= last_addr_mask;
|
new_addr |= last_addr_mask;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (huge_pte_none(huge_ptep_get(src_pte)))
|
if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
|
if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
|
||||||
@ -5657,7 +5657,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(mm, address, ptep);
|
||||||
if (huge_pte_none(pte)) {
|
if (huge_pte_none(pte)) {
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
continue;
|
continue;
|
||||||
@ -5906,7 +5906,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
|
|||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
|
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
|
||||||
pte_t pte = huge_ptep_get(vmf->pte);
|
pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
struct folio *old_folio;
|
struct folio *old_folio;
|
||||||
struct folio *new_folio;
|
struct folio *new_folio;
|
||||||
@ -6027,7 +6027,7 @@ retry_avoidcopy:
|
|||||||
vmf->pte = hugetlb_walk(vma, vmf->address,
|
vmf->pte = hugetlb_walk(vma, vmf->address,
|
||||||
huge_page_size(h));
|
huge_page_size(h));
|
||||||
if (likely(vmf->pte &&
|
if (likely(vmf->pte &&
|
||||||
pte_same(huge_ptep_get(vmf->pte), pte)))
|
pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
|
||||||
goto retry_avoidcopy;
|
goto retry_avoidcopy;
|
||||||
/*
|
/*
|
||||||
* race occurs while re-acquiring page table
|
* race occurs while re-acquiring page table
|
||||||
@ -6065,7 +6065,7 @@ retry_avoidcopy:
|
|||||||
*/
|
*/
|
||||||
spin_lock(vmf->ptl);
|
spin_lock(vmf->ptl);
|
||||||
vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
|
vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
|
||||||
if (likely(vmf->pte && pte_same(huge_ptep_get(vmf->pte), pte))) {
|
if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
|
||||||
pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
|
pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
|
||||||
|
|
||||||
/* Break COW or unshare */
|
/* Break COW or unshare */
|
||||||
@ -6166,14 +6166,14 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
|
|||||||
* Recheck pte with pgtable lock. Returns true if pte didn't change, or
|
* Recheck pte with pgtable lock. Returns true if pte didn't change, or
|
||||||
* false if pte changed or is changing.
|
* false if pte changed or is changing.
|
||||||
*/
|
*/
|
||||||
static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
|
static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
|
||||||
pte_t *ptep, pte_t old_pte)
|
pte_t *ptep, pte_t old_pte)
|
||||||
{
|
{
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
bool same;
|
bool same;
|
||||||
|
|
||||||
ptl = huge_pte_lock(h, mm, ptep);
|
ptl = huge_pte_lock(h, mm, ptep);
|
||||||
same = pte_same(huge_ptep_get(ptep), old_pte);
|
same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
|
|
||||||
return same;
|
return same;
|
||||||
@ -6234,7 +6234,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
|
|||||||
* never happen on the page after UFFDIO_COPY has
|
* never happen on the page after UFFDIO_COPY has
|
||||||
* correctly installed the page and returned.
|
* correctly installed the page and returned.
|
||||||
*/
|
*/
|
||||||
if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
|
if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -6263,7 +6263,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
|
|||||||
* here. Before returning error, get ptl and make
|
* here. Before returning error, get ptl and make
|
||||||
* sure there really is no pte entry.
|
* sure there really is no pte entry.
|
||||||
*/
|
*/
|
||||||
if (hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte))
|
if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
|
||||||
ret = vmf_error(PTR_ERR(folio));
|
ret = vmf_error(PTR_ERR(folio));
|
||||||
else
|
else
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -6312,7 +6312,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
|
|||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
/* See comment in userfaultfd_missing() block above */
|
/* See comment in userfaultfd_missing() block above */
|
||||||
if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
|
if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -6339,7 +6339,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
|
|||||||
vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
|
vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
/* If pte changed from under us, retry */
|
/* If pte changed from under us, retry */
|
||||||
if (!pte_same(huge_ptep_get(vmf->pte), vmf->orig_pte))
|
if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
|
||||||
goto backout;
|
goto backout;
|
||||||
|
|
||||||
if (anon_rmap)
|
if (anon_rmap)
|
||||||
@ -6460,7 +6460,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
}
|
}
|
||||||
|
|
||||||
vmf.orig_pte = huge_ptep_get(vmf.pte);
|
vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
|
||||||
if (huge_pte_none_mostly(vmf.orig_pte)) {
|
if (huge_pte_none_mostly(vmf.orig_pte)) {
|
||||||
if (is_pte_marker(vmf.orig_pte)) {
|
if (is_pte_marker(vmf.orig_pte)) {
|
||||||
pte_marker marker =
|
pte_marker marker =
|
||||||
@ -6501,7 +6501,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
* be released there.
|
* be released there.
|
||||||
*/
|
*/
|
||||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||||
migration_entry_wait_huge(vma, vmf.pte);
|
migration_entry_wait_huge(vma, vmf.address, vmf.pte);
|
||||||
return 0;
|
return 0;
|
||||||
} else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte)))
|
} else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte)))
|
||||||
ret = VM_FAULT_HWPOISON_LARGE |
|
ret = VM_FAULT_HWPOISON_LARGE |
|
||||||
@ -6534,11 +6534,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
|
vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
|
||||||
|
|
||||||
/* Check for a racing update before calling hugetlb_wp() */
|
/* Check for a racing update before calling hugetlb_wp() */
|
||||||
if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(vmf.pte))))
|
if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
|
||||||
goto out_ptl;
|
goto out_ptl;
|
||||||
|
|
||||||
/* Handle userfault-wp first, before trying to lock more pages */
|
/* Handle userfault-wp first, before trying to lock more pages */
|
||||||
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(vmf.pte)) &&
|
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
|
||||||
(flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
|
(flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
|
||||||
if (!userfaultfd_wp_async(vma)) {
|
if (!userfaultfd_wp_async(vma)) {
|
||||||
spin_unlock(vmf.ptl);
|
spin_unlock(vmf.ptl);
|
||||||
@ -6666,7 +6666,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
|
|||||||
ptl = huge_pte_lock(h, dst_mm, dst_pte);
|
ptl = huge_pte_lock(h, dst_mm, dst_pte);
|
||||||
|
|
||||||
/* Don't overwrite any existing PTEs (even markers) */
|
/* Don't overwrite any existing PTEs (even markers) */
|
||||||
if (!huge_pte_none(huge_ptep_get(dst_pte))) {
|
if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
@ -6802,7 +6802,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
|
|||||||
* page backing it, then access the page.
|
* page backing it, then access the page.
|
||||||
*/
|
*/
|
||||||
ret = -EEXIST;
|
ret = -EEXIST;
|
||||||
if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
|
if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte)))
|
||||||
goto out_release_unlock;
|
goto out_release_unlock;
|
||||||
|
|
||||||
if (folio_in_pagecache)
|
if (folio_in_pagecache)
|
||||||
@ -6923,7 +6923,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||||||
address |= last_addr_mask;
|
address |= last_addr_mask;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(mm, address, ptep);
|
||||||
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
|
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
|
||||||
/* Nothing to do. */
|
/* Nothing to do. */
|
||||||
} else if (unlikely(is_hugetlb_entry_migration(pte))) {
|
} else if (unlikely(is_hugetlb_entry_migration(pte))) {
|
||||||
|
@ -835,7 +835,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
|
|||||||
struct mm_walk *walk)
|
struct mm_walk *walk)
|
||||||
{
|
{
|
||||||
struct hwpoison_walk *hwp = walk->private;
|
struct hwpoison_walk *hwp = walk->private;
|
||||||
pte_t pte = huge_ptep_get(ptep);
|
pte_t pte = huge_ptep_get(walk->mm, addr, ptep);
|
||||||
struct hstate *h = hstate_vma(walk->vma);
|
struct hstate *h = hstate_vma(walk->vma);
|
||||||
|
|
||||||
return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
|
return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
|
||||||
|
@ -624,7 +624,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
|
|||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
|
||||||
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
|
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
|
||||||
entry = huge_ptep_get(pte);
|
entry = huge_ptep_get(walk->mm, addr, pte);
|
||||||
if (!pte_present(entry)) {
|
if (!pte_present(entry)) {
|
||||||
if (unlikely(is_hugetlb_entry_migration(entry)))
|
if (unlikely(is_hugetlb_entry_migration(entry)))
|
||||||
qp->nr_failed++;
|
qp->nr_failed++;
|
||||||
|
@ -338,14 +338,14 @@ out:
|
|||||||
*
|
*
|
||||||
* This function will release the vma lock before returning.
|
* This function will release the vma lock before returning.
|
||||||
*/
|
*/
|
||||||
void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
|
void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
|
spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
|
||||||
pte_t pte;
|
pte_t pte;
|
||||||
|
|
||||||
hugetlb_vma_assert_locked(vma);
|
hugetlb_vma_assert_locked(vma);
|
||||||
spin_lock(ptl);
|
spin_lock(ptl);
|
||||||
pte = huge_ptep_get(ptep);
|
pte = huge_ptep_get(vma->vm_mm, addr, ptep);
|
||||||
|
|
||||||
if (unlikely(!is_hugetlb_entry_migration(pte))) {
|
if (unlikely(!is_hugetlb_entry_migration(pte))) {
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
|
@ -33,7 +33,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
|
|||||||
* Hugepages under user process are always in RAM and never
|
* Hugepages under user process are always in RAM and never
|
||||||
* swapped out, but theoretically it needs to be checked.
|
* swapped out, but theoretically it needs to be checked.
|
||||||
*/
|
*/
|
||||||
present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
|
present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
|
||||||
for (; addr != end; vec++, addr += PAGE_SIZE)
|
for (; addr != end; vec++, addr += PAGE_SIZE)
|
||||||
*vec = present;
|
*vec = present;
|
||||||
walk->private = vec;
|
walk->private = vec;
|
||||||
|
@ -587,7 +587,7 @@ retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
|
if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
|
||||||
!huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
|
!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
|
||||||
err = -EEXIST;
|
err = -EEXIST;
|
||||||
hugetlb_vma_unlock_read(dst_vma);
|
hugetlb_vma_unlock_read(dst_vma);
|
||||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||||
|
Loading…
Reference in New Issue
Block a user