2007-04-30 06:30:56 +00:00
|
|
|
#ifndef _ASM_POWERPC_PGALLOC_64_H
|
|
|
|
#define _ASM_POWERPC_PGALLOC_64_H
|
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
|
2010-04-21 16:21:03 +00:00
|
|
|
struct vmemmap_backing {
|
|
|
|
struct vmemmap_backing *list;
|
|
|
|
unsigned long phys;
|
|
|
|
unsigned long virt_addr;
|
|
|
|
};
|
2013-11-15 17:31:32 +00:00
|
|
|
extern struct vmemmap_backing *vmemmap_list;
|
2010-04-21 16:21:03 +00:00
|
|
|
|
2009-10-28 16:27:18 +00:00
|
|
|
/*
|
|
|
|
* Functions that deal with pagetables that could be at any level of
|
|
|
|
* the table need to be passed an "index_size" so they know how to
|
|
|
|
* handle allocation. For PTE pages (which are linked to a struct
|
|
|
|
* page for now, and drawn from the main get_free_pages() pool), the
|
|
|
|
* allocation size will be (2^index_size * sizeof(pointer)) and
|
|
|
|
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
|
|
|
|
*
|
|
|
|
* The maximum index size needs to be big enough to allow any
|
|
|
|
* pagetable sizes we need, but small enough to fit in the low bits of
|
|
|
|
* any page table pointer. In other words all pagetables, even tiny
|
|
|
|
* ones, must be aligned to allow at least enough low 0 bits to
|
|
|
|
* contain this value. This value is also used as a mask, so it must
|
|
|
|
* be one less than a power of two.
|
|
|
|
*/
|
|
|
|
#define MAX_PGTABLE_INDEX_SIZE 0xf
|
|
|
|
|
2007-04-30 06:30:56 +00:00
|
|
|
extern struct kmem_cache *pgtable_cache[];
|
2018-11-29 14:07:03 +00:00
|
|
|
#define PGT_CACHE(shift) pgtable_cache[shift]
|
2007-04-30 06:30:56 +00:00
|
|
|
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
|
{
|
2017-05-02 05:17:04 +00:00
|
|
|
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
|
|
|
pgtable_gfp_flags(mm, GFP_KERNEL));
|
2007-04-30 06:30:56 +00:00
|
|
|
}
|
|
|
|
|
2008-02-05 06:29:14 +00:00
|
|
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
2007-04-30 06:30:56 +00:00
|
|
|
{
|
2009-10-28 16:27:18 +00:00
|
|
|
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
|
2007-04-30 06:30:56 +00:00
|
|
|
}
|
|
|
|
|
2016-04-29 13:26:15 +00:00
|
|
|
#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
|
2007-04-30 06:30:56 +00:00
|
|
|
|
|
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
2017-05-02 05:17:04 +00:00
|
|
|
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
|
|
|
|
pgtable_gfp_flags(mm, GFP_KERNEL));
|
2007-04-30 06:30:56 +00:00
|
|
|
}
|
|
|
|
|
2008-02-05 06:29:14 +00:00
|
|
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
2007-04-30 06:30:56 +00:00
|
|
|
{
|
2009-10-28 16:27:18 +00:00
|
|
|
kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
|
2007-04-30 06:30:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
|
|
{
|
2016-04-29 13:26:15 +00:00
|
|
|
pud_set(pud, (unsigned long)pmd);
|
2007-04-30 06:30:56 +00:00
|
|
|
}
|
|
|
|
|
2015-12-01 03:36:35 +00:00
|
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
pte_t *pte)
|
|
|
|
{
|
2016-04-29 13:26:15 +00:00
|
|
|
pmd_set(pmd, (unsigned long)pte);
|
2015-12-01 03:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
pgtable_t pte_page)
|
|
|
|
{
|
2016-04-29 13:26:15 +00:00
|
|
|
pmd_set(pmd, (unsigned long)page_address(pte_page));
|
2015-12-01 03:36:35 +00:00
|
|
|
}
|
|
|
|
|
2008-02-08 12:22:04 +00:00
|
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
2007-04-30 06:30:56 +00:00
|
|
|
|
2018-04-16 11:27:19 +00:00
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
|
|
|
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
|
|
|
pgtable_gfp_flags(mm, GFP_KERNEL));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
|
|
{
|
|
|
|
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
mm: treewide: remove unused address argument from pte_alloc functions
Patch series "Add support for fast mremap".
This series speeds up the mremap(2) syscall by copying page tables at
the PMD level even for non-THP systems. There is concern that the extra
'address' argument that mremap passes to pte_alloc may do something
subtle architecture related in the future that may make the scheme not
work. Also we find that there is no point in passing the 'address' to
pte_alloc since its unused. This patch therefore removes this argument
tree-wide resulting in a nice negative diff as well. Also ensuring
along the way that the enabled architectures do not do anything funky
with the 'address' argument that goes unnoticed by the optimization.
Build and boot tested on x86-64. Build tested on arm64. The config
enablement patch for arm64 will be posted in the future after more
testing.
The changes were obtained by applying the following Coccinelle script.
(thanks Julia for answering all Coccinelle questions!).
Following fix ups were done manually:
* Removal of address argument from pte_fragment_alloc
* Removal of pte_alloc_one_fast definitions from m68k and microblaze.
// Options: --include-headers --no-includes
// Note: I split the 'identifier fn' line, so if you are manually
// running it, please unsplit it so it runs for you.
virtual patch
@pte_alloc_func_def depends on patch exists@
identifier E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
type T2;
@@
fn(...
- , T2 E2
)
{ ... }
@pte_alloc_func_proto_noarg depends on patch exists@
type T1, T2, T3, T4;
identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
(
- T3 fn(T1, T2);
+ T3 fn(T1);
|
- T3 fn(T1, T2, T4);
+ T3 fn(T1, T2);
)
@pte_alloc_func_proto depends on patch exists@
identifier E1, E2, E4;
type T1, T2, T3, T4;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
(
- T3 fn(T1 E1, T2 E2);
+ T3 fn(T1 E1);
|
- T3 fn(T1 E1, T2 E2, T4 E4);
+ T3 fn(T1 E1, T2 E2);
)
@pte_alloc_func_call depends on patch exists@
expression E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
fn(...
-, E2
)
@pte_alloc_macro depends on patch exists@
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
identifier a, b, c;
expression e;
position p;
@@
(
- #define fn(a, b, c) e
+ #define fn(a, b) e
|
- #define fn(a, b) e
+ #define fn(a) e
)
Link: http://lkml.kernel.org/r/20181108181201.88826-2-joelaf@google.com
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Suggested-by: Kirill A. Shutemov <kirill@shutemov.name>
Acked-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Julia Lawall <Julia.Lawall@lip6.fr>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-03 23:28:34 +00:00
|
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
2013-04-28 09:37:32 +00:00
|
|
|
{
|
2016-06-24 21:48:47 +00:00
|
|
|
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
2013-04-28 09:37:32 +00:00
|
|
|
}
|
|
|
|
|
mm: treewide: remove unused address argument from pte_alloc functions
Patch series "Add support for fast mremap".
This series speeds up the mremap(2) syscall by copying page tables at
the PMD level even for non-THP systems. There is concern that the extra
'address' argument that mremap passes to pte_alloc may do something
subtle architecture related in the future that may make the scheme not
work. Also we find that there is no point in passing the 'address' to
pte_alloc since its unused. This patch therefore removes this argument
tree-wide resulting in a nice negative diff as well. Also ensuring
along the way that the enabled architectures do not do anything funky
with the 'address' argument that goes unnoticed by the optimization.
Build and boot tested on x86-64. Build tested on arm64. The config
enablement patch for arm64 will be posted in the future after more
testing.
The changes were obtained by applying the following Coccinelle script.
(thanks Julia for answering all Coccinelle questions!).
Following fix ups were done manually:
* Removal of address argument from pte_fragment_alloc
* Removal of pte_alloc_one_fast definitions from m68k and microblaze.
// Options: --include-headers --no-includes
// Note: I split the 'identifier fn' line, so if you are manually
// running it, please unsplit it so it runs for you.
virtual patch
@pte_alloc_func_def depends on patch exists@
identifier E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
type T2;
@@
fn(...
- , T2 E2
)
{ ... }
@pte_alloc_func_proto_noarg depends on patch exists@
type T1, T2, T3, T4;
identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
(
- T3 fn(T1, T2);
+ T3 fn(T1);
|
- T3 fn(T1, T2, T4);
+ T3 fn(T1, T2);
)
@pte_alloc_func_proto depends on patch exists@
identifier E1, E2, E4;
type T1, T2, T3, T4;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
(
- T3 fn(T1 E1, T2 E2);
+ T3 fn(T1 E1);
|
- T3 fn(T1 E1, T2 E2, T4 E4);
+ T3 fn(T1 E1, T2 E2);
)
@pte_alloc_func_call depends on patch exists@
expression E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
fn(...
-, E2
)
@pte_alloc_macro depends on patch exists@
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
identifier a, b, c;
expression e;
position p;
@@
(
- #define fn(a, b, c) e
+ #define fn(a, b) e
|
- #define fn(a, b) e
+ #define fn(a) e
)
Link: http://lkml.kernel.org/r/20181108181201.88826-2-joelaf@google.com
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Suggested-by: Kirill A. Shutemov <kirill@shutemov.name>
Acked-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Julia Lawall <Julia.Lawall@lip6.fr>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-03 23:28:34 +00:00
|
|
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
|
2013-04-28 09:37:32 +00:00
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
pte_t *pte;
|
|
|
|
|
2017-05-02 05:17:04 +00:00
|
|
|
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
|
2013-04-28 09:37:32 +00:00
|
|
|
if (!pte)
|
|
|
|
return NULL;
|
|
|
|
page = virt_to_page(pte);
|
2013-11-14 22:31:38 +00:00
|
|
|
if (!pgtable_page_ctor(page)) {
|
|
|
|
__free_page(page);
|
|
|
|
return NULL;
|
|
|
|
}
|
2013-04-28 09:37:32 +00:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
|
|
|
{
|
|
|
|
free_page((unsigned long)pte);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
|
|
|
{
|
|
|
|
pgtable_page_dtor(ptepage);
|
|
|
|
__free_page(ptepage);
|
|
|
|
}
|
|
|
|
|
2018-04-16 11:27:19 +00:00
|
|
|
static inline void pgtable_free(void *table, int shift)
|
|
|
|
{
|
|
|
|
if (!shift) {
|
2018-05-30 12:32:25 +00:00
|
|
|
pgtable_page_dtor(virt_to_page(table));
|
2018-04-16 11:27:19 +00:00
|
|
|
free_page((unsigned long)table);
|
|
|
|
} else {
|
|
|
|
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
|
|
|
kmem_cache_free(PGT_CACHE(shift), table);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-14 10:31:52 +00:00
|
|
|
#define get_hugepd_cache_index(x) (x)
|
2013-04-28 09:37:32 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2018-04-16 11:27:19 +00:00
|
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
2013-04-28 09:37:32 +00:00
|
|
|
{
|
2018-04-16 11:27:19 +00:00
|
|
|
unsigned long pgf = (unsigned long)table;
|
|
|
|
|
|
|
|
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
|
|
|
pgf |= shift;
|
|
|
|
tlb_remove_table(tlb, (void *)pgf);
|
2013-04-28 09:37:32 +00:00
|
|
|
}
|
2007-04-30 06:30:56 +00:00
|
|
|
|
2018-04-16 11:27:19 +00:00
|
|
|
static inline void __tlb_remove_table(void *_table)
|
2013-04-28 09:37:32 +00:00
|
|
|
{
|
2018-04-16 11:27:19 +00:00
|
|
|
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
|
|
|
|
unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
|
|
|
|
|
|
|
|
pgtable_free(table, shift);
|
2013-04-28 09:37:32 +00:00
|
|
|
}
|
|
|
|
|
2018-04-16 11:27:19 +00:00
|
|
|
#else
|
|
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
2013-04-28 09:37:32 +00:00
|
|
|
{
|
2018-04-16 11:27:19 +00:00
|
|
|
pgtable_free(table, shift);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
|
|
|
unsigned long address)
|
|
|
|
{
|
|
|
|
tlb_flush_pgtable(tlb, address);
|
|
|
|
pgtable_free_tlb(tlb, page_address(table), 0);
|
2013-04-28 09:37:32 +00:00
|
|
|
}
|
|
|
|
|
2009-10-28 16:27:18 +00:00
|
|
|
#define __pmd_free_tlb(tlb, pmd, addr) \
|
2013-06-20 09:00:14 +00:00
|
|
|
pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
|
2016-04-29 13:26:15 +00:00
|
|
|
#ifndef CONFIG_PPC_64K_PAGES
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 05:44:28 +00:00
|
|
|
#define __pud_free_tlb(tlb, pud, addr) \
|
2009-10-28 16:27:18 +00:00
|
|
|
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
|
|
|
|
|
2016-04-29 13:26:15 +00:00
|
|
|
#endif /* CONFIG_PPC_64K_PAGES */
|
2007-04-30 06:30:56 +00:00
|
|
|
|
|
|
|
#define check_pgt_cache() do { } while (0)
|
|
|
|
|
|
|
|
#endif /* _ASM_POWERPC_PGALLOC_64_H */
|