ARC: mm: switch pgtable_t back to struct page *

So far ARC pgtable_t has not been struct page based to avoid extra
page_address() calls involved. However the differences are down to
noise and get in the way of using generic code, hence this patch.

This also allows us to reuse generic THP depost/withdraw code.

There's some additional consideration for PGDIR_SHIFT in 4K page config.
Now due to page tables being PAGE_SIZE deep only, the address split
can't be really arbitrary.

Tested-by: kernel test robot <lkp@intel.com>
Suggested-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
This commit is contained in:
Vineet Gupta
2021-08-12 12:54:43 -07:00
parent f35534a2bc
commit d9820ff76f
6 changed files with 28 additions and 87 deletions

View File

@@ -45,22 +45,17 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
set_pmd(pmd, __pmd((unsigned long)pte));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
static inline int __get_order_pgd(void)
{
return get_order(PTRS_PER_PGD * sizeof(pgd_t));
set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
int num, num2;
pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
int num, num2;
num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
memzero(ret, num * sizeof(pgd_t));
@@ -76,61 +71,43 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, __get_order_pgd());
}
/*
* With software-only page-tables, addr-split for traversal is tweakable and
* that directly governs how big tables would be at each level.
* Further, the MMU page size is configurable.
* Thus we need to programatically assert the size constraint
* All of this is const math, allowing gcc to do constant folding/propagation.
*/
static inline int __get_order_pte(void)
{
return get_order(PTRS_PER_PTE * sizeof(pte_t));
free_page((unsigned long)pgd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
__get_order_pte());
pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_ZERO);
return pte;
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
pgtable_t pte_pg;
struct page *page;
pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
if (!pte_pg)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
page = virt_to_page(pte_pg);
page = (pgtable_t)alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
if (!page)
return NULL;
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return 0;
return NULL;
}
return pte_pg;
return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page)
{
pgtable_pte_page_dtor(virt_to_page(ptep));
free_pages((unsigned long)ptep, __get_order_pte());
pgtable_pte_page_dtor(pte_page);
__free_page(pte_page);
}
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)