2019-05-27 06:55:01 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2016-04-29 13:26:14 +00:00
|
|
|
#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
|
|
|
|
#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
|
2016-04-29 13:26:13 +00:00
|
|
|
/*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/cpumask.h>
|
2018-07-19 14:33:16 +00:00
|
|
|
#include <linux/kmemleak.h>
|
2016-04-29 13:26:13 +00:00
|
|
|
#include <linux/percpu.h>
|
|
|
|
|
|
|
|
struct vmemmap_backing {
|
|
|
|
struct vmemmap_backing *list;
|
|
|
|
unsigned long phys;
|
|
|
|
unsigned long virt_addr;
|
|
|
|
};
|
|
|
|
extern struct vmemmap_backing *vmemmap_list;
|
|
|
|
|
2018-04-16 11:27:22 +00:00
|
|
|
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
|
|
|
|
extern void pmd_fragment_free(unsigned long *);
|
2016-04-29 13:26:18 +00:00
|
|
|
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
extern void __tlb_remove_table(void *_table);
|
|
|
|
#endif
|
2018-11-29 14:06:51 +00:00
|
|
|
void pte_frag_destroy(void *pte_frag);
|
2016-04-29 13:26:18 +00:00
|
|
|
|
2016-04-29 13:26:19 +00:00
|
|
|
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
2017-05-02 05:17:04 +00:00
|
|
|
return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
|
2016-04-29 13:26:19 +00:00
|
|
|
#else
|
|
|
|
struct page *page;
|
2017-07-12 21:36:45 +00:00
|
|
|
page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
|
2017-05-02 05:17:04 +00:00
|
|
|
4);
|
2016-04-29 13:26:19 +00:00
|
|
|
if (!page)
|
|
|
|
return NULL;
|
|
|
|
return (pgd_t *) page_address(page);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
free_page((unsigned long)pgd);
|
|
|
|
#else
|
|
|
|
free_pages((unsigned long)pgd, 4);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-04-29 13:26:13 +00:00
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
|
{
|
2018-02-13 11:09:33 +00:00
|
|
|
pgd_t *pgd;
|
|
|
|
|
2016-04-29 13:26:19 +00:00
|
|
|
if (radix_enabled())
|
|
|
|
return radix__pgd_alloc(mm);
|
2018-02-13 11:09:33 +00:00
|
|
|
|
|
|
|
pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
|
|
|
pgtable_gfp_flags(mm, GFP_KERNEL));
|
powerpc/book3s/64: check for NULL pointer in pgd_alloc()
When the memset code was added to pgd_alloc(), it failed to consider
that kmem_cache_alloc() can return NULL. It's uncommon, but not
impossible under heavy memory contention. Example oops:
Unable to handle kernel paging request for data at address 0x00000000
Faulting instruction address: 0xc0000000000a4000
Oops: Kernel access of bad area, sig: 11 [#1]
LE SMP NR_CPUS=2048 NUMA pSeries
CPU: 70 PID: 48471 Comm: entrypoint.sh Kdump: loaded Not tainted 4.14.0-115.6.1.el7a.ppc64le #1
task: c000000334a00000 task.stack: c000000331c00000
NIP: c0000000000a4000 LR: c00000000012f43c CTR: 0000000000000020
REGS: c000000331c039c0 TRAP: 0300 Not tainted (4.14.0-115.6.1.el7a.ppc64le)
MSR: 800000010280b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 44022840 XER: 20040000
CFAR: c000000000008874 DAR: 0000000000000000 DSISR: 42000000 SOFTE: 1
...
NIP [c0000000000a4000] memset+0x68/0x104
LR [c00000000012f43c] mm_init+0x27c/0x2f0
Call Trace:
mm_init+0x260/0x2f0 (unreliable)
copy_mm+0x11c/0x638
copy_process.isra.28.part.29+0x6fc/0x1080
_do_fork+0xdc/0x4c0
ppc_clone+0x8/0xc
Instruction dump:
409e000c b0860000 38c60002 409d000c 90860000 38c60004 78a0d183 78a506a0
7c0903a6 41820034 60000000 60420000 <f8860000> f8860008 f8860010 f8860018
Fixes: fc5c2f4a55a2 ("powerpc/mm/hash64: Zero PGD pages on allocation")
Cc: stable@vger.kernel.org # v4.16+
Signed-off-by: Rick Lindsley <ricklind@vnet.linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-05-06 00:20:43 +00:00
|
|
|
if (unlikely(!pgd))
|
|
|
|
return pgd;
|
|
|
|
|
2018-07-19 14:33:16 +00:00
|
|
|
/*
|
|
|
|
* Don't scan the PGD for pointers, it contains references to PUDs but
|
|
|
|
* those references are not full pointers and so can't be recognised by
|
|
|
|
* kmemleak.
|
|
|
|
*/
|
|
|
|
kmemleak_no_scan(pgd);
|
|
|
|
|
2018-03-26 10:04:50 +00:00
|
|
|
/*
|
|
|
|
* With hugetlb, we don't clear the second half of the page table.
|
|
|
|
* If we share the same slab cache with the pmd or pud level table,
|
|
|
|
* we need to make sure we zero out the full table on alloc.
|
|
|
|
* With 4K we don't store slot in the second half. Hence we don't
|
|
|
|
* need to do this for 4k.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
|
2018-04-16 11:27:23 +00:00
|
|
|
(H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
|
2018-02-13 11:09:33 +00:00
|
|
|
memset(pgd, 0, PGD_TABLE_SIZE);
|
2018-03-26 10:04:50 +00:00
|
|
|
#endif
|
2018-02-13 11:09:33 +00:00
|
|
|
return pgd;
|
2016-04-29 13:26:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
|
|
{
|
2016-04-29 13:26:19 +00:00
|
|
|
if (radix_enabled())
|
|
|
|
return radix__pgd_free(mm, pgd);
|
2016-04-29 13:26:13 +00:00
|
|
|
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
|
|
|
|
}
|
|
|
|
|
2016-04-29 13:26:14 +00:00
|
|
|
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|
|
|
{
|
2019-02-14 06:45:40 +00:00
|
|
|
*pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
|
2016-04-29 13:26:14 +00:00
|
|
|
}
|
2016-04-29 13:26:13 +00:00
|
|
|
|
|
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
2018-07-19 14:33:16 +00:00
|
|
|
pud_t *pud;
|
|
|
|
|
|
|
|
pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
|
|
|
|
pgtable_gfp_flags(mm, GFP_KERNEL));
|
|
|
|
/*
|
|
|
|
* Tell kmemleak to ignore the PUD, that means don't scan it for
|
|
|
|
* pointers and don't consider it a leak. PUDs are typically only
|
|
|
|
* referred to by their PGD, but kmemleak is not able to recognise those
|
|
|
|
* as pointers, leading to false leak reports.
|
|
|
|
*/
|
|
|
|
kmemleak_ignore(pud);
|
|
|
|
|
|
|
|
return pud;
|
2016-04-29 13:26:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
|
|
|
{
|
2018-02-11 15:00:06 +00:00
|
|
|
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
|
2016-04-29 13:26:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
|
|
{
|
2019-02-14 06:45:40 +00:00
|
|
|
*pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS);
|
2016-04-29 13:26:13 +00:00
|
|
|
}
|
|
|
|
|
2016-04-29 13:26:18 +00:00
|
|
|
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
2018-04-16 11:27:21 +00:00
|
|
|
unsigned long address)
|
2016-04-29 13:26:18 +00:00
|
|
|
{
|
2016-06-08 14:25:51 +00:00
|
|
|
/*
|
|
|
|
* By now all the pud entries should be none entries. So go
|
|
|
|
* ahead and flush the page walk cache
|
|
|
|
*/
|
|
|
|
flush_tlb_pgtable(tlb, address);
|
2018-04-16 11:27:21 +00:00
|
|
|
pgtable_free_tlb(tlb, pud, PUD_INDEX);
|
2016-04-29 13:26:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
2018-04-16 11:27:23 +00:00
|
|
|
return pmd_fragment_alloc(mm, addr);
|
2016-04-29 13:26:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
|
|
{
|
2018-04-16 11:27:23 +00:00
|
|
|
pmd_fragment_free((unsigned long *)pmd);
|
2016-04-29 13:26:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
2018-04-16 11:27:21 +00:00
|
|
|
unsigned long address)
|
2016-04-29 13:26:18 +00:00
|
|
|
{
|
2016-06-08 14:25:51 +00:00
|
|
|
/*
|
|
|
|
* By now all the pud entries should be none entries. So go
|
|
|
|
* ahead and flush the page walk cache
|
|
|
|
*/
|
|
|
|
flush_tlb_pgtable(tlb, address);
|
2018-04-16 11:27:21 +00:00
|
|
|
return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
|
2016-04-29 13:26:18 +00:00
|
|
|
}
|
|
|
|
|
2016-04-29 13:26:13 +00:00
|
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
pte_t *pte)
|
|
|
|
{
|
2019-02-14 06:45:40 +00:00
|
|
|
*pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS);
|
2016-04-29 13:26:13 +00:00
|
|
|
}
|
2016-04-29 13:26:18 +00:00
|
|
|
|
2016-04-29 13:26:13 +00:00
|
|
|
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
pgtable_t pte_page)
|
|
|
|
{
|
2019-02-14 06:45:40 +00:00
|
|
|
*pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
|
2016-04-29 13:26:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
|
|
|
unsigned long address)
|
|
|
|
{
|
2016-06-08 14:25:51 +00:00
|
|
|
/*
|
|
|
|
* By now all the pud entries should be none entries. So go
|
|
|
|
* ahead and flush the page walk cache
|
|
|
|
*/
|
|
|
|
flush_tlb_pgtable(tlb, address);
|
2018-04-16 11:27:21 +00:00
|
|
|
pgtable_free_tlb(tlb, table, PTE_INDEX);
|
2016-04-29 13:26:13 +00:00
|
|
|
}
|
|
|
|
|
2018-08-13 05:44:57 +00:00
|
|
|
extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
|
|
|
|
static inline void update_page_count(int psize, long count)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_PROC_FS))
|
|
|
|
atomic_long_add(count, &direct_pages_count[psize]);
|
|
|
|
}
|
|
|
|
|
2016-04-29 13:26:14 +00:00
|
|
|
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
|