2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/mm/mmu.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-2005 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/init.h>
|
2015-06-01 11:40:32 +00:00
|
|
|
#include <linux/libfdt.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/nodemask.h>
|
|
|
|
#include <linux/memblock.h>
|
|
|
|
#include <linux/fs.h>
|
2012-10-23 13:55:08 +00:00
|
|
|
#include <linux/io.h>
|
2015-01-29 17:33:35 +00:00
|
|
|
#include <linux/slab.h>
|
2015-01-22 01:36:06 +00:00
|
|
|
#include <linux/stop_machine.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-01-25 11:44:56 +00:00
|
|
|
#include <asm/barrier.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm/cputype.h>
|
2014-11-21 21:50:42 +00:00
|
|
|
#include <asm/fixmap.h>
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
#include <asm/kasan.h>
|
2015-10-19 13:19:28 +00:00
|
|
|
#include <asm/kernel-pgtable.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/sizes.h>
|
|
|
|
#include <asm/tlb.h>
|
2014-05-12 09:40:51 +00:00
|
|
|
#include <asm/memblock.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
|
|
|
|
#include "mm.h"
|
|
|
|
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
|
|
|
|
2016-02-16 12:52:42 +00:00
|
|
|
u64 kimage_voffset __read_mostly;
|
|
|
|
EXPORT_SYMBOL(kimage_voffset);
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Empty_zero_page is a special page that is used for zero-initialized data
|
|
|
|
* and COW.
|
|
|
|
*/
|
arm64: mm: place empty_zero_page in bss
Currently the zero page is set up in paging_init, and thus we cannot use
the zero page earlier. We use the zero page as a reserved TTBR value
from which no TLB entries may be allocated (e.g. when uninstalling the
idmap). To enable such usage earlier (as may be required for invasive
changes to the kernel page tables), and to minimise the time that the
idmap is active, we need to be able to use the zero page before
paging_init.
This patch follows the example set by x86, by allocating the zero page
at compile time, in .bss. This means that the zero page itself is
available immediately upon entry to start_kernel (as we zero .bss before
this), and also means that the zero page takes up no space in the raw
Image binary. The associated struct page is allocated in bootmem_init,
and remains unavailable until this time.
Outside of arch code, the only users of empty_zero_page assume that the
empty_zero_page symbol refers to the zeroed memory itself, and that
ZERO_PAGE(x) must be used to acquire the associated struct page,
following the example of x86. This patch also brings arm64 inline with
these assumptions.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:44:57 +00:00
|
|
|
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
|
2012-03-05 11:49:27 +00:00
|
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
|
|
|
2016-02-16 12:52:40 +00:00
|
|
|
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
|
|
|
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
|
|
|
|
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
|
|
unsigned long size, pgprot_t vma_prot)
|
|
|
|
{
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return pgprot_noncached(vma_prot);
|
|
|
|
else if (file->f_flags & O_SYNC)
|
|
|
|
return pgprot_writecombine(vma_prot);
|
|
|
|
return vma_prot;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(phys_mem_access_prot);
|
|
|
|
|
2016-01-25 11:45:08 +00:00
|
|
|
static phys_addr_t __init early_pgtable_alloc(void)
|
2012-03-05 11:49:27 +00:00
|
|
|
{
|
2015-11-20 17:45:40 +00:00
|
|
|
phys_addr_t phys;
|
|
|
|
void *ptr;
|
|
|
|
|
2016-01-25 11:44:56 +00:00
|
|
|
phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
2015-11-20 17:45:40 +00:00
|
|
|
BUG_ON(!phys);
|
2016-01-25 11:45:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
|
|
|
|
* slot will be free, so we can (ab)use the FIX_PTE slot to initialise
|
|
|
|
* any level of table.
|
|
|
|
*/
|
|
|
|
ptr = pte_set_fixmap(phys);
|
|
|
|
|
2016-01-25 11:44:56 +00:00
|
|
|
memset(ptr, 0, PAGE_SIZE);
|
|
|
|
|
2016-01-25 11:45:08 +00:00
|
|
|
/*
|
|
|
|
* Implicit barriers also ensure the zeroed page is visible to the page
|
|
|
|
* table walker
|
|
|
|
*/
|
|
|
|
pte_clear_fixmap();
|
|
|
|
|
|
|
|
return phys;
|
2012-03-05 11:49:27 +00:00
|
|
|
}
|
|
|
|
|
2015-01-22 01:36:06 +00:00
|
|
|
/*
|
|
|
|
* remap a PMD into pages
|
|
|
|
*/
|
|
|
|
static void split_pmd(pmd_t *pmd, pte_t *pte)
|
|
|
|
{
|
|
|
|
unsigned long pfn = pmd_pfn(*pmd);
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* Need to have the least restrictive permissions available
|
2015-11-26 15:42:41 +00:00
|
|
|
* permissions will be fixed up later
|
2015-01-22 01:36:06 +00:00
|
|
|
*/
|
2015-11-26 15:42:41 +00:00
|
|
|
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
|
2015-01-22 01:36:06 +00:00
|
|
|
pfn++;
|
|
|
|
} while (pte++, i++, i < PTRS_PER_PTE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
2015-11-26 15:42:41 +00:00
|
|
|
unsigned long end, unsigned long pfn,
|
2015-01-22 01:36:06 +00:00
|
|
|
pgprot_t prot,
|
2016-01-25 11:45:08 +00:00
|
|
|
phys_addr_t (*pgtable_alloc)(void))
|
2012-03-05 11:49:27 +00:00
|
|
|
{
|
|
|
|
pte_t *pte;
|
|
|
|
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-27 16:36:30 +00:00
|
|
|
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
|
2016-02-06 00:24:46 +00:00
|
|
|
phys_addr_t pte_phys;
|
|
|
|
BUG_ON(!pgtable_alloc);
|
|
|
|
pte_phys = pgtable_alloc();
|
2016-01-25 11:45:08 +00:00
|
|
|
pte = pte_set_fixmap(pte_phys);
|
2015-01-22 01:36:06 +00:00
|
|
|
if (pmd_sect(*pmd))
|
|
|
|
split_pmd(pmd, pte);
|
2016-01-25 11:45:08 +00:00
|
|
|
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
|
2015-01-22 01:36:06 +00:00
|
|
|
flush_tlb_all();
|
2016-01-25 11:45:08 +00:00
|
|
|
pte_clear_fixmap();
|
2012-03-05 11:49:27 +00:00
|
|
|
}
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-27 16:36:30 +00:00
|
|
|
BUG_ON(pmd_bad(*pmd));
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-01-25 11:45:08 +00:00
|
|
|
pte = pte_set_fixmap_offset(pmd, addr);
|
2012-03-05 11:49:27 +00:00
|
|
|
do {
|
2015-11-26 15:42:41 +00:00
|
|
|
set_pte(pte, pfn_pte(pfn, prot));
|
|
|
|
pfn++;
|
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
2016-01-25 11:45:08 +00:00
|
|
|
|
|
|
|
pte_clear_fixmap();
|
2012-03-05 11:49:27 +00:00
|
|
|
}
|
|
|
|
|
2015-11-12 12:04:43 +00:00
|
|
|
static void split_pud(pud_t *old_pud, pmd_t *pmd)
|
2015-01-22 01:36:06 +00:00
|
|
|
{
|
|
|
|
unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
|
|
|
|
pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
do {
|
2015-06-30 16:04:49 +00:00
|
|
|
set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
|
2015-01-22 01:36:06 +00:00
|
|
|
addr += PMD_SIZE;
|
|
|
|
} while (pmd++, i++, i < PTRS_PER_PMD);
|
|
|
|
}
|
|
|
|
|
2016-02-06 00:24:47 +00:00
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
|
static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If debug_page_alloc is enabled we must map the linear map
|
|
|
|
* using pages. However, other mappings created by
|
|
|
|
* create_mapping_noalloc must use sections in some cases. Allow
|
|
|
|
* sections to be used in those cases, where no pgtable_alloc
|
|
|
|
* function is provided.
|
|
|
|
*/
|
|
|
|
return !pgtable_alloc || !debug_pagealloc_enabled();
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-01-25 11:45:10 +00:00
|
|
|
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
2015-01-22 01:36:06 +00:00
|
|
|
phys_addr_t phys, pgprot_t prot,
|
2016-01-25 11:45:08 +00:00
|
|
|
phys_addr_t (*pgtable_alloc)(void))
|
2012-03-05 11:49:27 +00:00
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for initial section mappings in the pgd/pud and remove them.
|
|
|
|
*/
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-27 16:36:30 +00:00
|
|
|
if (pud_none(*pud) || pud_sect(*pud)) {
|
2016-02-06 00:24:46 +00:00
|
|
|
phys_addr_t pmd_phys;
|
|
|
|
BUG_ON(!pgtable_alloc);
|
|
|
|
pmd_phys = pgtable_alloc();
|
2016-01-25 11:45:08 +00:00
|
|
|
pmd = pmd_set_fixmap(pmd_phys);
|
2015-01-22 01:36:06 +00:00
|
|
|
if (pud_sect(*pud)) {
|
|
|
|
/*
|
|
|
|
* need to have the 1G of mappings continue to be
|
|
|
|
* present
|
|
|
|
*/
|
|
|
|
split_pud(pud, pmd);
|
|
|
|
}
|
2016-01-25 11:45:08 +00:00
|
|
|
__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
|
2015-01-22 01:36:06 +00:00
|
|
|
flush_tlb_all();
|
2016-01-25 11:45:08 +00:00
|
|
|
pmd_clear_fixmap();
|
2012-03-05 11:49:27 +00:00
|
|
|
}
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-27 16:36:30 +00:00
|
|
|
BUG_ON(pud_bad(*pud));
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-01-25 11:45:08 +00:00
|
|
|
pmd = pmd_set_fixmap_offset(pud, addr);
|
2012-03-05 11:49:27 +00:00
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
/* try section mapping first */
|
2016-02-06 00:24:47 +00:00
|
|
|
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
|
|
|
|
block_mappings_allowed(pgtable_alloc)) {
|
2014-02-04 16:01:31 +00:00
|
|
|
pmd_t old_pmd =*pmd;
|
2016-03-22 10:11:45 +00:00
|
|
|
pmd_set_huge(pmd, phys, prot);
|
2014-02-04 16:01:31 +00:00
|
|
|
/*
|
|
|
|
* Check for previous table entries created during
|
|
|
|
* boot (__create_page_tables) and flush them.
|
|
|
|
*/
|
2014-12-09 07:26:47 +00:00
|
|
|
if (!pmd_none(old_pmd)) {
|
2014-02-04 16:01:31 +00:00
|
|
|
flush_tlb_all();
|
2014-12-09 07:26:47 +00:00
|
|
|
if (pmd_table(old_pmd)) {
|
2016-01-25 11:45:05 +00:00
|
|
|
phys_addr_t table = pmd_page_paddr(old_pmd);
|
2015-01-29 17:33:35 +00:00
|
|
|
if (!WARN_ON_ONCE(slab_is_available()))
|
|
|
|
memblock_free(table, PAGE_SIZE);
|
2014-12-09 07:26:47 +00:00
|
|
|
}
|
|
|
|
}
|
2014-02-04 16:01:31 +00:00
|
|
|
} else {
|
2015-11-26 15:42:41 +00:00
|
|
|
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
|
2016-01-25 11:44:56 +00:00
|
|
|
prot, pgtable_alloc);
|
2014-02-04 16:01:31 +00:00
|
|
|
}
|
2012-03-05 11:49:27 +00:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pmd++, addr = next, addr != end);
|
2016-01-25 11:45:08 +00:00
|
|
|
|
|
|
|
pmd_clear_fixmap();
|
2012-03-05 11:49:27 +00:00
|
|
|
}
|
|
|
|
|
2015-01-22 01:36:06 +00:00
|
|
|
static inline bool use_1G_block(unsigned long addr, unsigned long next,
|
|
|
|
unsigned long phys)
|
|
|
|
{
|
|
|
|
if (PAGE_SHIFT != 12)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (((addr | next | phys) & ~PUD_MASK) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-25 11:45:10 +00:00
|
|
|
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
2015-01-22 01:36:06 +00:00
|
|
|
phys_addr_t phys, pgprot_t prot,
|
2016-01-25 11:45:08 +00:00
|
|
|
phys_addr_t (*pgtable_alloc)(void))
|
2012-03-05 11:49:27 +00:00
|
|
|
{
|
2014-05-12 09:40:51 +00:00
|
|
|
pud_t *pud;
|
2012-03-05 11:49:27 +00:00
|
|
|
unsigned long next;
|
|
|
|
|
2014-05-12 09:40:51 +00:00
|
|
|
if (pgd_none(*pgd)) {
|
2016-02-06 00:24:46 +00:00
|
|
|
phys_addr_t pud_phys;
|
|
|
|
BUG_ON(!pgtable_alloc);
|
|
|
|
pud_phys = pgtable_alloc();
|
2016-01-25 11:45:08 +00:00
|
|
|
__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
|
2014-05-12 09:40:51 +00:00
|
|
|
}
|
|
|
|
BUG_ON(pgd_bad(*pgd));
|
|
|
|
|
2016-01-25 11:45:08 +00:00
|
|
|
pud = pud_set_fixmap_offset(pgd, addr);
|
2012-03-05 11:49:27 +00:00
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
2014-05-06 13:02:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For 4K granule only, attempt to put down a 1GB block
|
|
|
|
*/
|
2016-02-06 00:24:47 +00:00
|
|
|
if (use_1G_block(addr, next, phys) &&
|
|
|
|
block_mappings_allowed(pgtable_alloc)) {
|
2014-05-06 13:02:27 +00:00
|
|
|
pud_t old_pud = *pud;
|
2016-03-22 10:11:45 +00:00
|
|
|
pud_set_huge(pud, phys, prot);
|
2014-05-06 13:02:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have an old value for a pud, it will
|
|
|
|
* be pointing to a pmd table that we no longer
|
|
|
|
* need (from swapper_pg_dir).
|
|
|
|
*
|
|
|
|
* Look up the old pmd table and free it.
|
|
|
|
*/
|
|
|
|
if (!pud_none(old_pud)) {
|
|
|
|
flush_tlb_all();
|
2014-12-09 07:26:47 +00:00
|
|
|
if (pud_table(old_pud)) {
|
2016-01-25 11:45:05 +00:00
|
|
|
phys_addr_t table = pud_page_paddr(old_pud);
|
2015-01-29 17:33:35 +00:00
|
|
|
if (!WARN_ON_ONCE(slab_is_available()))
|
|
|
|
memblock_free(table, PAGE_SIZE);
|
2014-12-09 07:26:47 +00:00
|
|
|
}
|
2014-05-06 13:02:27 +00:00
|
|
|
}
|
|
|
|
} else {
|
2016-01-25 11:45:10 +00:00
|
|
|
alloc_init_pmd(pud, addr, next, phys, prot,
|
2016-01-25 11:44:56 +00:00
|
|
|
pgtable_alloc);
|
2014-05-06 13:02:27 +00:00
|
|
|
}
|
2012-03-05 11:49:27 +00:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pud++, addr = next, addr != end);
|
2016-01-25 11:45:08 +00:00
|
|
|
|
|
|
|
pud_clear_fixmap();
|
2012-03-05 11:49:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create the page directory entries and any necessary page tables for the
|
|
|
|
* mapping specified by 'md'.
|
|
|
|
*/
|
2016-01-25 11:45:10 +00:00
|
|
|
static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
|
2015-01-22 01:36:06 +00:00
|
|
|
phys_addr_t size, pgprot_t prot,
|
2016-01-25 11:45:08 +00:00
|
|
|
phys_addr_t (*pgtable_alloc)(void))
|
2012-03-05 11:49:27 +00:00
|
|
|
{
|
|
|
|
unsigned long addr, length, end, next;
|
|
|
|
|
2015-11-23 13:26:19 +00:00
|
|
|
/*
|
|
|
|
* If the virtual and physical address don't have the same offset
|
|
|
|
* within a page, we cannot map the region as the caller expects.
|
|
|
|
*/
|
|
|
|
if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
|
|
|
|
return;
|
|
|
|
|
2015-11-23 13:26:20 +00:00
|
|
|
phys &= PAGE_MASK;
|
2012-03-05 11:49:27 +00:00
|
|
|
addr = virt & PAGE_MASK;
|
|
|
|
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
|
|
|
|
|
|
|
|
end = addr + length;
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
2016-01-25 11:45:10 +00:00
|
|
|
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
|
2012-03-05 11:49:27 +00:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
}
|
|
|
|
|
2016-01-25 11:45:08 +00:00
|
|
|
static phys_addr_t late_pgtable_alloc(void)
|
2015-01-22 01:36:06 +00:00
|
|
|
{
|
2016-01-25 11:44:56 +00:00
|
|
|
void *ptr = (void *)__get_free_page(PGALLOC_GFP);
|
2015-01-22 01:36:06 +00:00
|
|
|
BUG_ON(!ptr);
|
2016-01-25 11:44:56 +00:00
|
|
|
|
|
|
|
/* Ensure the zeroed page is visible to the page table walker */
|
|
|
|
dsb(ishst);
|
2016-01-25 11:45:08 +00:00
|
|
|
return __pa(ptr);
|
2015-01-22 01:36:06 +00:00
|
|
|
}
|
|
|
|
|
2016-01-25 11:45:10 +00:00
|
|
|
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
|
|
|
unsigned long virt, phys_addr_t size,
|
|
|
|
pgprot_t prot,
|
|
|
|
phys_addr_t (*alloc)(void))
|
|
|
|
{
|
|
|
|
init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
|
|
|
|
}
|
|
|
|
|
2016-02-06 00:24:46 +00:00
|
|
|
/*
|
|
|
|
* This function can only be used to modify existing table entries,
|
|
|
|
* without allocating new levels of table. Note that this permits the
|
|
|
|
* creation of new section or page entries.
|
|
|
|
*/
|
|
|
|
static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
|
2015-01-22 01:36:06 +00:00
|
|
|
phys_addr_t size, pgprot_t prot)
|
2014-03-12 16:28:06 +00:00
|
|
|
{
|
|
|
|
if (virt < VMALLOC_START) {
|
|
|
|
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
|
|
|
&phys, virt);
|
|
|
|
return;
|
|
|
|
}
|
2016-01-25 11:45:10 +00:00
|
|
|
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
|
2016-02-06 00:24:46 +00:00
|
|
|
NULL);
|
2014-03-12 16:28:06 +00:00
|
|
|
}
|
|
|
|
|
2014-10-20 13:42:07 +00:00
|
|
|
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|
|
|
unsigned long virt, phys_addr_t size,
|
|
|
|
pgprot_t prot)
|
|
|
|
{
|
2016-01-25 11:45:10 +00:00
|
|
|
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
|
|
|
|
late_pgtable_alloc);
|
2014-03-12 16:28:06 +00:00
|
|
|
}
|
|
|
|
|
2015-01-22 01:36:06 +00:00
|
|
|
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
|
|
|
phys_addr_t size, pgprot_t prot)
|
|
|
|
{
|
|
|
|
if (virt < VMALLOC_START) {
|
|
|
|
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
|
|
|
&phys, virt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-01-25 11:45:10 +00:00
|
|
|
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
|
|
|
|
late_pgtable_alloc);
|
2015-01-22 01:36:06 +00:00
|
|
|
}
|
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
|
2015-01-22 01:36:06 +00:00
|
|
|
{
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
unsigned long kernel_start = __pa(_stext);
|
2016-02-16 12:52:40 +00:00
|
|
|
unsigned long kernel_end = __pa(_etext);
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
|
2015-01-22 01:36:06 +00:00
|
|
|
/*
|
2016-02-16 12:52:40 +00:00
|
|
|
* Take care not to create a writable alias for the
|
|
|
|
* read-only text and rodata sections of the kernel image.
|
2015-01-22 01:36:06 +00:00
|
|
|
*/
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
|
2016-02-16 12:52:40 +00:00
|
|
|
/* No overlap with the kernel text */
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
if (end < kernel_start || start >= kernel_end) {
|
|
|
|
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
|
|
|
|
end - start, PAGE_KERNEL,
|
|
|
|
early_pgtable_alloc);
|
|
|
|
return;
|
2015-01-22 01:36:06 +00:00
|
|
|
}
|
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
/*
|
2016-02-16 12:52:40 +00:00
|
|
|
* This block overlaps the kernel text mapping.
|
|
|
|
* Map the portion(s) which don't overlap.
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
*/
|
|
|
|
if (start < kernel_start)
|
|
|
|
__create_pgd_mapping(pgd, start,
|
|
|
|
__phys_to_virt(start),
|
|
|
|
kernel_start - start, PAGE_KERNEL,
|
|
|
|
early_pgtable_alloc);
|
|
|
|
if (kernel_end < end)
|
|
|
|
__create_pgd_mapping(pgd, kernel_end,
|
|
|
|
__phys_to_virt(kernel_end),
|
|
|
|
end - kernel_end, PAGE_KERNEL,
|
|
|
|
early_pgtable_alloc);
|
2016-02-16 12:52:40 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Map the linear alias of the [_stext, _etext) interval as
|
|
|
|
* read-only/non-executable. This makes the contents of the
|
|
|
|
* region accessible to subsystems such as hibernate, but
|
|
|
|
* protects it from inadvertent modification or execution.
|
|
|
|
*/
|
|
|
|
__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
|
|
|
|
kernel_end - kernel_start, PAGE_KERNEL_RO,
|
|
|
|
early_pgtable_alloc);
|
2015-01-22 01:36:06 +00:00
|
|
|
}
|
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
static void __init map_mem(pgd_t *pgd)
|
2012-03-05 11:49:27 +00:00
|
|
|
{
|
|
|
|
struct memblock_region *reg;
|
2013-04-30 10:00:33 +00:00
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/* map all the memory banks */
|
|
|
|
for_each_memblock(memory, reg) {
|
|
|
|
phys_addr_t start = reg->base;
|
|
|
|
phys_addr_t end = start + reg->size;
|
|
|
|
|
|
|
|
if (start >= end)
|
|
|
|
break;
|
2015-11-30 12:28:16 +00:00
|
|
|
if (memblock_is_nomap(reg))
|
|
|
|
continue;
|
2012-03-05 11:49:27 +00:00
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
__map_memblock(pgd, start, end);
|
2012-03-05 11:49:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-22 01:36:06 +00:00
|
|
|
void mark_rodata_ro(void)
|
|
|
|
{
|
2016-02-19 17:50:32 +00:00
|
|
|
unsigned long section_size;
|
2016-02-16 12:52:40 +00:00
|
|
|
|
2016-02-19 17:50:32 +00:00
|
|
|
section_size = (unsigned long)__start_rodata - (unsigned long)_stext;
|
2015-01-22 01:36:06 +00:00
|
|
|
create_mapping_late(__pa(_stext), (unsigned long)_stext,
|
2016-02-19 17:50:32 +00:00
|
|
|
section_size, PAGE_KERNEL_ROX);
|
|
|
|
/*
|
|
|
|
* mark .rodata as read only. Use _etext rather than __end_rodata to
|
|
|
|
* cover NOTES and EXCEPTION_TABLE.
|
|
|
|
*/
|
|
|
|
section_size = (unsigned long)_etext - (unsigned long)__start_rodata;
|
|
|
|
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
|
|
|
|
section_size, PAGE_KERNEL_RO);
|
2015-01-22 01:36:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void fixup_init(void)
|
|
|
|
{
|
2016-02-16 12:52:40 +00:00
|
|
|
/*
|
|
|
|
* Unmap the __init region but leave the VM area in place. This
|
|
|
|
* prevents the region from being reused for kernel modules, which
|
|
|
|
* is not supported by kallsyms.
|
|
|
|
*/
|
|
|
|
unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
|
2015-01-22 01:36:06 +00:00
|
|
|
}
|
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
|
2016-02-16 12:52:40 +00:00
|
|
|
pgprot_t prot, struct vm_struct *vma)
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
{
|
|
|
|
phys_addr_t pa_start = __pa(va_start);
|
|
|
|
unsigned long size = va_end - va_start;
|
|
|
|
|
|
|
|
BUG_ON(!PAGE_ALIGNED(pa_start));
|
|
|
|
BUG_ON(!PAGE_ALIGNED(size));
|
|
|
|
|
|
|
|
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
|
|
|
|
early_pgtable_alloc);
|
2016-02-16 12:52:40 +00:00
|
|
|
|
|
|
|
vma->addr = va_start;
|
|
|
|
vma->phys_addr = pa_start;
|
|
|
|
vma->size = size;
|
|
|
|
vma->flags = VM_MAP;
|
|
|
|
vma->caller = __builtin_return_address(0);
|
|
|
|
|
|
|
|
vm_area_add_early(vma);
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create fine-grained mappings for the kernel.
|
|
|
|
*/
|
|
|
|
static void __init map_kernel(pgd_t *pgd)
|
|
|
|
{
|
2016-02-19 17:50:32 +00:00
|
|
|
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
|
2016-02-19 17:50:32 +00:00
|
|
|
map_kernel_chunk(pgd, _stext, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text);
|
|
|
|
map_kernel_chunk(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata);
|
2016-02-16 12:52:40 +00:00
|
|
|
map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
|
|
|
|
&vmlinux_init);
|
|
|
|
map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
|
2016-02-16 12:52:40 +00:00
|
|
|
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
|
|
|
|
/*
|
|
|
|
* The fixmap falls in a separate pgd to the kernel, and doesn't
|
|
|
|
* live in the carveout for the swapper_pg_dir. We can simply
|
|
|
|
* re-use the existing dir for the fixmap.
|
|
|
|
*/
|
|
|
|
set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
|
|
|
|
*pgd_offset_k(FIXADDR_START));
|
|
|
|
} else if (CONFIG_PGTABLE_LEVELS > 3) {
|
|
|
|
/*
|
|
|
|
* The fixmap shares its top level pgd entry with the kernel
|
|
|
|
* mapping. This can really only occur when we are running
|
|
|
|
* with 16k/4 levels, so we can simply reuse the pud level
|
|
|
|
* entry instead.
|
|
|
|
*/
|
|
|
|
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
|
|
|
set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
|
|
|
|
__pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
|
|
|
|
pud_clear_fixmap();
|
|
|
|
} else {
|
|
|
|
BUG();
|
|
|
|
}
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
|
|
|
|
kasan_copy_shadow(pgd);
|
|
|
|
}
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* paging_init() sets up the page tables, initialises the zone memory
|
|
|
|
* maps and sets up the zero page.
|
|
|
|
*/
|
|
|
|
void __init paging_init(void)
|
|
|
|
{
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:12 +00:00
|
|
|
phys_addr_t pgd_phys = early_pgtable_alloc();
|
|
|
|
pgd_t *pgd = pgd_set_fixmap(pgd_phys);
|
|
|
|
|
|
|
|
map_kernel(pgd);
|
|
|
|
map_mem(pgd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want to reuse the original swapper_pg_dir so we don't have to
|
|
|
|
* communicate the new address to non-coherent secondaries in
|
|
|
|
* secondary_entry, and so cpu_switch_mm can generate the address with
|
|
|
|
* adrp+add rather than a load from some global variable.
|
|
|
|
*
|
|
|
|
* To do this we need to go via a temporary pgd.
|
|
|
|
*/
|
|
|
|
cpu_replace_ttbr1(__va(pgd_phys));
|
|
|
|
memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
|
|
|
|
cpu_replace_ttbr1(swapper_pg_dir);
|
|
|
|
|
|
|
|
pgd_clear_fixmap();
|
|
|
|
memblock_free(pgd_phys, PAGE_SIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
|
|
|
|
* allocated with it.
|
|
|
|
*/
|
|
|
|
memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
|
|
|
|
SWAPPER_DIR_SIZE - PAGE_SIZE);
|
2012-03-05 11:49:27 +00:00
|
|
|
|
|
|
|
bootmem_init();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether a kernel address is valid (derived from arch/x86/).
|
|
|
|
*/
|
|
|
|
int kern_addr_valid(unsigned long addr)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
if ((((long)addr) >> VA_BITS) != -1UL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
|
|
if (pgd_none(*pgd))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
if (pud_none(*pud))
|
|
|
|
return 0;
|
|
|
|
|
2014-05-06 13:02:27 +00:00
|
|
|
if (pud_sect(*pud))
|
|
|
|
return pfn_valid(pud_pfn(*pud));
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
return 0;
|
|
|
|
|
2014-04-15 17:53:24 +00:00
|
|
|
if (pmd_sect(*pmd))
|
|
|
|
return pfn_valid(pmd_pfn(*pmd));
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
|
|
if (pte_none(*pte))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return pfn_valid(pte_pfn(*pte));
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
2015-10-19 13:19:28 +00:00
|
|
|
#if !ARM64_SWAPPER_USES_SECTION_MAPS
|
2013-04-29 22:07:50 +00:00
|
|
|
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
2012-03-05 11:49:27 +00:00
|
|
|
{
|
2013-04-29 22:07:50 +00:00
|
|
|
return vmemmap_populate_basepages(start, end, node);
|
2012-03-05 11:49:27 +00:00
|
|
|
}
|
2015-10-19 13:19:28 +00:00
|
|
|
#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
|
2013-04-29 22:07:50 +00:00
|
|
|
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
2012-03-05 11:49:27 +00:00
|
|
|
{
|
2013-04-29 22:07:50 +00:00
|
|
|
unsigned long addr = start;
|
2012-03-05 11:49:27 +00:00
|
|
|
unsigned long next;
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
|
|
|
|
pgd = vmemmap_pgd_populate(addr, node);
|
|
|
|
if (!pgd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pud = vmemmap_pud_populate(pgd, addr, node);
|
|
|
|
if (!pud)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
if (pmd_none(*pmd)) {
|
|
|
|
void *p = NULL;
|
|
|
|
|
|
|
|
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
|
|
|
|
if (!p)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-04-03 14:57:15 +00:00
|
|
|
set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
|
2012-03-05 11:49:27 +00:00
|
|
|
} else
|
|
|
|
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
|
|
|
} while (addr = next, addr != end);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_ARM64_64K_PAGES */
|
2013-04-29 22:07:50 +00:00
|
|
|
void vmemmap_free(unsigned long start, unsigned long end)
|
2013-02-23 00:33:08 +00:00
|
|
|
{
|
|
|
|
}
|
2012-03-05 11:49:27 +00:00
|
|
|
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
2014-11-21 21:50:42 +00:00
|
|
|
|
|
|
|
static inline pud_t * fixmap_pud(unsigned long addr)
|
|
|
|
{
|
|
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
|
|
|
|
|
|
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
|
|
|
|
|
2016-02-16 12:52:38 +00:00
|
|
|
return pud_offset_kimg(pgd, addr);
|
2014-11-21 21:50:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t * fixmap_pmd(unsigned long addr)
|
|
|
|
{
|
|
|
|
pud_t *pud = fixmap_pud(addr);
|
|
|
|
|
|
|
|
BUG_ON(pud_none(*pud) || pud_bad(*pud));
|
|
|
|
|
2016-02-16 12:52:38 +00:00
|
|
|
return pmd_offset_kimg(pud, addr);
|
2014-11-21 21:50:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t * fixmap_pte(unsigned long addr)
|
|
|
|
{
|
2016-02-16 12:52:38 +00:00
|
|
|
return &bm_pte[pte_index(addr)];
|
2014-11-21 21:50:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init early_fixmap_init(void)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long addr = FIXADDR_START;
|
|
|
|
|
|
|
|
pgd = pgd_offset_k(addr);
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
if (CONFIG_PGTABLE_LEVELS > 3 &&
|
|
|
|
!(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
|
2016-02-16 12:52:40 +00:00
|
|
|
/*
|
|
|
|
* We only end up here if the kernel mapping and the fixmap
|
|
|
|
* share the top level pgd entry, which should only happen on
|
|
|
|
* 16k/4 levels configurations.
|
|
|
|
*/
|
|
|
|
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
|
|
|
pud = pud_offset_kimg(pgd, addr);
|
|
|
|
} else {
|
|
|
|
pgd_populate(&init_mm, pgd, bm_pud);
|
|
|
|
pud = fixmap_pud(addr);
|
|
|
|
}
|
2014-11-21 21:50:42 +00:00
|
|
|
pud_populate(&init_mm, pud, bm_pmd);
|
2016-02-16 12:52:38 +00:00
|
|
|
pmd = fixmap_pmd(addr);
|
2014-11-21 21:50:42 +00:00
|
|
|
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The boot-ioremap range spans multiple pmds, for which
|
2016-02-16 12:52:38 +00:00
|
|
|
* we are not prepared:
|
2014-11-21 21:50:42 +00:00
|
|
|
*/
|
|
|
|
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
|
|
|
|
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
|
|
|
|
|
|
|
|
if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|
|
|
|
|| pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
|
|
|
WARN_ON(1);
|
|
|
|
pr_warn("pmd %p != %p, %p\n",
|
|
|
|
pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
|
|
|
|
fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
|
|
|
|
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
|
|
|
fix_to_virt(FIX_BTMAP_BEGIN));
|
|
|
|
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
|
|
|
|
fix_to_virt(FIX_BTMAP_END));
|
|
|
|
|
|
|
|
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
|
|
|
|
pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void __set_fixmap(enum fixed_addresses idx,
|
|
|
|
phys_addr_t phys, pgprot_t flags)
|
|
|
|
{
|
|
|
|
unsigned long addr = __fix_to_virt(idx);
|
|
|
|
pte_t *pte;
|
|
|
|
|
2015-03-04 13:27:35 +00:00
|
|
|
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
|
2014-11-21 21:50:42 +00:00
|
|
|
|
|
|
|
pte = fixmap_pte(addr);
|
|
|
|
|
|
|
|
if (pgprot_val(flags)) {
|
|
|
|
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
|
|
|
} else {
|
|
|
|
pte_clear(&init_mm, addr, pte);
|
|
|
|
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
|
|
|
|
}
|
|
|
|
}
|
2015-06-01 11:40:32 +00:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
2015-06-01 11:40:32 +00:00
|
|
|
{
|
|
|
|
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
int offset;
|
2015-06-01 11:40:32 +00:00
|
|
|
void *dt_virt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the physical FDT address is set and meets the minimum
|
|
|
|
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
|
|
|
|
* at least 8 bytes so that we can always access the size field of the
|
|
|
|
* FDT header after mapping the first chunk, double check here if that
|
|
|
|
* is indeed the case.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
|
|
|
|
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that the FDT region can be mapped without the need to
|
|
|
|
* allocate additional translation table pages, so that it is safe
|
2016-02-06 00:24:46 +00:00
|
|
|
* to call create_mapping_noalloc() this early.
|
2015-06-01 11:40:32 +00:00
|
|
|
*
|
|
|
|
* On 64k pages, the FDT will be mapped using PTEs, so we need to
|
|
|
|
* be in the same PMD as the rest of the fixmap.
|
|
|
|
* On 4k pages, we'll use section mappings for the FDT so we only
|
|
|
|
* have to be in the same PUD.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(dt_virt_base % SZ_2M);
|
|
|
|
|
2015-10-19 13:19:28 +00:00
|
|
|
BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
|
|
|
|
__fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
|
2015-06-01 11:40:32 +00:00
|
|
|
|
2015-10-19 13:19:28 +00:00
|
|
|
offset = dt_phys % SWAPPER_BLOCK_SIZE;
|
2015-06-01 11:40:32 +00:00
|
|
|
dt_virt = (void *)dt_virt_base + offset;
|
|
|
|
|
|
|
|
/* map the first chunk so we can read the size from the header */
|
2016-02-06 00:24:46 +00:00
|
|
|
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
|
|
|
|
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
|
2015-06-01 11:40:32 +00:00
|
|
|
|
|
|
|
if (fdt_check_header(dt_virt) != 0)
|
|
|
|
return NULL;
|
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
*size = fdt_totalsize(dt_virt);
|
|
|
|
if (*size > MAX_FDT_SIZE)
|
2015-06-01 11:40:32 +00:00
|
|
|
return NULL;
|
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
if (offset + *size > SWAPPER_BLOCK_SIZE)
|
2016-02-06 00:24:46 +00:00
|
|
|
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
|
2015-06-01 11:40:32 +00:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
return dt_virt;
|
|
|
|
}
|
2015-06-01 11:40:32 +00:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
|
|
|
|
{
|
|
|
|
void *dt_virt;
|
|
|
|
int size;
|
|
|
|
|
|
|
|
dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
|
|
|
|
if (!dt_virt)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memblock_reserve(dt_phys, size);
|
2015-06-01 11:40:32 +00:00
|
|
|
return dt_virt;
|
|
|
|
}
|
2016-02-16 12:52:35 +00:00
|
|
|
|
|
|
|
int __init arch_ioremap_pud_supported(void)
|
|
|
|
{
|
|
|
|
/* only 4k granule supports level 1 block mappings */
|
|
|
|
return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __init arch_ioremap_pmd_supported(void)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
|
|
|
|
{
|
|
|
|
BUG_ON(phys & ~PUD_MASK);
|
|
|
|
set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
|
|
|
|
{
|
|
|
|
BUG_ON(phys & ~PMD_MASK);
|
|
|
|
set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pud_clear_huge(pud_t *pud)
|
|
|
|
{
|
|
|
|
if (!pud_sect(*pud))
|
|
|
|
return 0;
|
|
|
|
pud_clear(pud);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pmd_clear_huge(pmd_t *pmd)
|
|
|
|
{
|
|
|
|
if (!pmd_sect(*pmd))
|
|
|
|
return 0;
|
|
|
|
pmd_clear(pmd);
|
|
|
|
return 1;
|
|
|
|
}
|