forked from Minki/linux
hugetlb: restrict hugepage_migration_support() to x86_64
Currently hugepage migration is available for all archs which support pmd-level hugepage, but testing is done only for x86_64 and there're bugs for other archs. So to avoid breaking such archs, this patch limits the availability strictly to x86_64 until developers of other archs get interested in enabling this feature. Simply disabling hugepage migration on non-x86_64 archs is not enough to fix the reported problem where sys_move_pages() hits the BUG_ON() in follow_page(FOLL_GET), so let's fix this by checking if hugepage migration is supported in vma_migratable(). Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reported-by: Michael Ellerman <mpe@ellerman.id.au> Tested-by: Michael Ellerman <mpe@ellerman.id.au> Acked-by: Hugh Dickins <hughd@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Cc: <stable@vger.kernel.org> [3.12+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7f39dda9d8
commit
c177c81e09
@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
@ -58,11 +58,6 @@ int pud_huge(pud_t pud)
|
||||
#endif
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __init int setup_hugepagesz(char *opt)
|
||||
{
|
||||
unsigned long ps = memparse(opt, &opt);
|
||||
|
@ -114,11 +114,6 @@ int pud_huge(pud_t pud)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct page *
|
||||
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
|
||||
{
|
||||
|
@ -110,11 +110,6 @@ int pud_huge(pud_t pud)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pmd_t *pmd, int write)
|
||||
{
|
||||
|
@ -84,11 +84,6 @@ int pud_huge(pud_t pud)
|
||||
return (pud_val(pud) & _PAGE_HUGE) != 0;
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct page *
|
||||
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pmd_t *pmd, int write)
|
||||
|
@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd)
|
||||
*/
|
||||
return ((pgd_val(pgd) & 0x3) != 0x0);
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
|
@ -220,11 +220,6 @@ int pud_huge(pud_t pud)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pmd_t *pmdp, int write)
|
||||
{
|
||||
|
@ -83,11 +83,6 @@ int pud_huge(pud_t pud)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pmd_t *pmd, int write)
|
||||
{
|
||||
|
@ -231,11 +231,6 @@ int pud_huge(pud_t pud)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pmd_t *pmd, int write)
|
||||
{
|
||||
|
@ -166,11 +166,6 @@ int pud_huge(pud_t pud)
|
||||
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pmd_t *pmd, int write)
|
||||
{
|
||||
|
@ -1873,6 +1873,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
||||
def_bool y
|
||||
depends on X86_64 || X86_PAE
|
||||
|
||||
config ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
def_bool y
|
||||
depends on X86_64 && HUGETLB_PAGE && MIGRATION
|
||||
|
||||
menu "Power management and ACPI options"
|
||||
|
||||
config ARCH_HIBERNATION_HEADER
|
||||
|
@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
||||
struct page *
|
||||
@ -80,11 +75,6 @@ int pud_huge(pud_t pud)
|
||||
{
|
||||
return !!(pud_val(pud) & _PAGE_PSE);
|
||||
}
|
||||
|
||||
int pmd_huge_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page)
|
||||
|
||||
extern void dissolve_free_huge_pages(unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
int pmd_huge_support(void);
|
||||
/*
|
||||
* Currently hugepage migration is enabled only for pmd-based hugepage.
|
||||
* This function will be updated when hugepage migration is more widely
|
||||
* supported.
|
||||
*/
|
||||
static inline int hugepage_migration_support(struct hstate *h)
|
||||
{
|
||||
return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
|
||||
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
return huge_page_shift(h) == PMD_SHIFT;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
||||
@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page)
|
||||
return page->index;
|
||||
}
|
||||
#define dissolve_free_huge_pages(s, e) do {} while (0)
|
||||
#define pmd_huge_support() 0
|
||||
#define hugepage_migration_support(h) 0
|
||||
|
||||
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
||||
|
@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
|
||||
return 0;
|
||||
|
||||
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
if (vma->vm_flags & VM_HUGETLB)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Migration allocates pages in the highest zone. If we cannot
|
||||
* do so then migration (at least from node to node) is not
|
||||
|
@ -267,6 +267,9 @@ config MIGRATION
|
||||
pages as migration can relocate pages to satisfy a huge page
|
||||
allocation instead of reclaiming.
|
||||
|
||||
config ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
boolean
|
||||
|
||||
config PHYS_ADDR_T_64BIT
|
||||
def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user