2019-02-02 09:41:15 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2014-04-16 02:47:52 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
|
|
|
|
*
|
|
|
|
* This file implements the EFI boot stub for the arm64 kernel.
|
|
|
|
* Adapted from ARM version by Mark Salter <msalter@redhat.com>
|
|
|
|
*/
|
2017-08-18 19:49:36 +00:00
|
|
|
|
|
|
|
|
2014-04-16 02:47:52 +00:00
|
|
|
#include <linux/efi.h>
|
2014-07-02 12:54:41 +00:00
|
|
|
#include <asm/efi.h>
|
2017-07-14 14:54:36 +00:00
|
|
|
#include <asm/memory.h>
|
2020-02-10 16:02:32 +00:00
|
|
|
#include <asm/sections.h>
|
2016-02-17 12:36:02 +00:00
|
|
|
#include <asm/sysreg.h>
|
2014-04-16 02:47:52 +00:00
|
|
|
|
2016-01-26 13:48:29 +00:00
|
|
|
#include "efistub.h"
|
|
|
|
|
2019-12-24 15:10:19 +00:00
|
|
|
efi_status_t check_platform_features(void)
|
2016-02-17 12:36:02 +00:00
|
|
|
{
|
|
|
|
u64 tg;
|
|
|
|
|
|
|
|
/* UEFI mandates support for 4 KB granularity, no need to check */
|
|
|
|
if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
|
|
|
|
return EFI_SUCCESS;
|
|
|
|
|
|
|
|
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
|
2021-03-10 05:53:10 +00:00
|
|
|
if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
|
2016-02-17 12:36:02 +00:00
|
|
|
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
|
2020-04-30 18:28:35 +00:00
|
|
|
efi_err("This 64 KB granular kernel is not supported by your CPU\n");
|
2016-02-17 12:36:02 +00:00
|
|
|
else
|
2020-04-30 18:28:35 +00:00
|
|
|
efi_err("This 16 KB granular kernel is not supported by your CPU\n");
|
2016-02-17 12:36:02 +00:00
|
|
|
return EFI_UNSUPPORTED;
|
|
|
|
}
|
|
|
|
return EFI_SUCCESS;
|
|
|
|
}
|
2014-04-16 02:47:52 +00:00
|
|
|
|
2021-07-26 09:38:41 +00:00
|
|
|
/*
|
|
|
|
* Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
|
|
|
|
* to provide space, and fail to zero it). Check for this condition by double
|
|
|
|
* checking that the first and the last byte of the image are covered by the
|
|
|
|
* same EFI memory map entry.
|
|
|
|
*/
|
|
|
|
static bool check_image_region(u64 base, u64 size)
|
|
|
|
{
|
|
|
|
unsigned long map_size, desc_size, buff_size;
|
|
|
|
efi_memory_desc_t *memory_map;
|
|
|
|
struct efi_boot_memmap map;
|
|
|
|
efi_status_t status;
|
|
|
|
bool ret = false;
|
|
|
|
int map_offset;
|
|
|
|
|
|
|
|
map.map = &memory_map;
|
|
|
|
map.map_size = &map_size;
|
|
|
|
map.desc_size = &desc_size;
|
|
|
|
map.desc_ver = NULL;
|
|
|
|
map.key_ptr = NULL;
|
|
|
|
map.buff_size = &buff_size;
|
|
|
|
|
|
|
|
status = efi_get_memory_map(&map);
|
|
|
|
if (status != EFI_SUCCESS)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
|
|
|
|
efi_memory_desc_t *md = (void *)memory_map + map_offset;
|
|
|
|
u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the region that covers base, and return whether
|
|
|
|
* it covers base+size bytes.
|
|
|
|
*/
|
|
|
|
if (base >= md->phys_addr && base < end) {
|
|
|
|
ret = (base + size) <= end;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
efi_bs_call(free_pool, memory_map);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-12-24 15:10:19 +00:00
|
|
|
efi_status_t handle_kernel_image(unsigned long *image_addr,
|
2016-02-17 12:35:57 +00:00
|
|
|
unsigned long *image_size,
|
|
|
|
unsigned long *reserve_addr,
|
|
|
|
unsigned long *reserve_size,
|
|
|
|
efi_loaded_image_t *image)
|
2014-04-16 02:47:52 +00:00
|
|
|
{
|
|
|
|
efi_status_t status;
|
|
|
|
unsigned long kernel_size, kernel_memsize = 0;
|
2020-04-13 13:36:37 +00:00
|
|
|
u32 phys_seed = 0;
|
2016-01-26 13:48:29 +00:00
|
|
|
|
2021-07-22 10:10:31 +00:00
|
|
|
/*
|
|
|
|
* Although relocatable kernels can fix up the misalignment with
|
|
|
|
* respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
|
|
|
|
* subtly out of sync with those recorded in the vmlinux when kaslr is
|
|
|
|
* disabled but the image required relocation anyway. Therefore retain
|
|
|
|
* 2M alignment if KASLR was explicitly disabled, even if it was not
|
|
|
|
* going to be activated to begin with.
|
|
|
|
*/
|
|
|
|
u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
|
|
|
|
|
2016-01-26 13:48:29 +00:00
|
|
|
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
2020-04-16 16:45:24 +00:00
|
|
|
if (!efi_nokaslr) {
|
2019-12-24 15:10:19 +00:00
|
|
|
status = efi_get_random_bytes(sizeof(phys_seed),
|
2016-01-26 13:48:29 +00:00
|
|
|
(u8 *)&phys_seed);
|
|
|
|
if (status == EFI_NOT_FOUND) {
|
2021-01-20 16:38:10 +00:00
|
|
|
efi_info("EFI_RNG_PROTOCOL unavailable\n");
|
2020-09-26 08:52:42 +00:00
|
|
|
efi_nokaslr = true;
|
2016-01-26 13:48:29 +00:00
|
|
|
} else if (status != EFI_SUCCESS) {
|
2021-01-20 16:38:10 +00:00
|
|
|
efi_err("efi_get_random_bytes() failed (0x%lx)\n",
|
2020-09-26 08:52:42 +00:00
|
|
|
status);
|
|
|
|
efi_nokaslr = true;
|
2016-01-26 13:48:29 +00:00
|
|
|
}
|
|
|
|
} else {
|
2020-04-30 18:28:35 +00:00
|
|
|
efi_info("KASLR disabled on kernel command line\n");
|
2016-01-26 13:48:29 +00:00
|
|
|
}
|
|
|
|
}
|
2015-10-29 14:07:25 +00:00
|
|
|
|
efi/libstub/arm64: Replace 'preferred' offset with alignment check
The notion of a 'preferred' load offset for the kernel dates back to the
times when the kernel's primary mapping overlapped with the linear region,
and memory below it could not be used at all.
Today, the arm64 kernel does not really care where it is loaded in physical
memory, as long as the alignment requirements are met, and so there is no
point in unconditionally moving the kernel to a new location in memory at
boot. Instead, we can
- check for a KASLR seed, and randomly reallocate the kernel if one is
provided
- otherwise, check whether the alignment requirements are met for the
current placement of the kernel, and just run it in place if they are
- finally, do an ordinary page allocation and reallocate the kernel to a
suitably aligned buffer anywhere in memory.
By the same reasoning, there is no need to take TEXT_OFFSET into account
if it is a round multiple of the minimum alignment, which is the usual
case for relocatable kernels with TEXT_OFFSET randomization disabled.
Otherwise, it suffices to use the relative misaligment of TEXT_OFFSET
when reallocating the kernel.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
2020-03-27 16:23:52 +00:00
|
|
|
if (image->image_base != _text)
|
2020-04-30 18:28:35 +00:00
|
|
|
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
|
2014-04-16 02:47:52 +00:00
|
|
|
|
2021-07-26 14:31:44 +00:00
|
|
|
if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
|
|
|
|
efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
|
|
|
|
EFI_KIMG_ALIGN >> 10);
|
|
|
|
|
2014-04-16 02:47:52 +00:00
|
|
|
kernel_size = _edata - _text;
|
2016-01-26 13:48:29 +00:00
|
|
|
kernel_memsize = kernel_size + (_end - _edata);
|
2020-08-25 13:54:40 +00:00
|
|
|
*reserve_size = kernel_memsize;
|
2016-01-26 13:48:29 +00:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
|
|
|
|
/*
|
|
|
|
* If KASLR is enabled, and we have some randomness available,
|
|
|
|
* locate the kernel at a randomized offset in physical memory.
|
|
|
|
*/
|
2021-07-22 10:10:31 +00:00
|
|
|
status = efi_random_alloc(*reserve_size, min_kimg_align,
|
2020-04-13 13:36:37 +00:00
|
|
|
reserve_addr, phys_seed);
|
2021-07-26 14:24:01 +00:00
|
|
|
if (status != EFI_SUCCESS)
|
|
|
|
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
|
2016-01-26 13:48:29 +00:00
|
|
|
} else {
|
efi/libstub/arm64: Replace 'preferred' offset with alignment check
The notion of a 'preferred' load offset for the kernel dates back to the
times when the kernel's primary mapping overlapped with the linear region,
and memory below it could not be used at all.
Today, the arm64 kernel does not really care where it is loaded in physical
memory, as long as the alignment requirements are met, and so there is no
point in unconditionally moving the kernel to a new location in memory at
boot. Instead, we can
- check for a KASLR seed, and randomly reallocate the kernel if one is
provided
- otherwise, check whether the alignment requirements are met for the
current placement of the kernel, and just run it in place if they are
- finally, do an ordinary page allocation and reallocate the kernel to a
suitably aligned buffer anywhere in memory.
By the same reasoning, there is no need to take TEXT_OFFSET into account
if it is a round multiple of the minimum alignment, which is the usual
case for relocatable kernels with TEXT_OFFSET randomization disabled.
Otherwise, it suffices to use the relative misaligment of TEXT_OFFSET
when reallocating the kernel.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
2020-03-27 16:23:52 +00:00
|
|
|
status = EFI_OUT_OF_RESOURCES;
|
2016-01-26 13:48:29 +00:00
|
|
|
}
|
2015-07-24 11:38:27 +00:00
|
|
|
|
2016-01-26 13:48:29 +00:00
|
|
|
if (status != EFI_SUCCESS) {
|
2021-07-26 09:38:41 +00:00
|
|
|
if (!check_image_region((u64)_text, kernel_memsize)) {
|
|
|
|
efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
|
2021-07-22 10:10:31 +00:00
|
|
|
} else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
|
efi/libstub/arm64: Replace 'preferred' offset with alignment check
The notion of a 'preferred' load offset for the kernel dates back to the
times when the kernel's primary mapping overlapped with the linear region,
and memory below it could not be used at all.
Today, the arm64 kernel does not really care where it is loaded in physical
memory, as long as the alignment requirements are met, and so there is no
point in unconditionally moving the kernel to a new location in memory at
boot. Instead, we can
- check for a KASLR seed, and randomly reallocate the kernel if one is
provided
- otherwise, check whether the alignment requirements are met for the
current placement of the kernel, and just run it in place if they are
- finally, do an ordinary page allocation and reallocate the kernel to a
suitably aligned buffer anywhere in memory.
By the same reasoning, there is no need to take TEXT_OFFSET into account
if it is a round multiple of the minimum alignment, which is the usual
case for relocatable kernels with TEXT_OFFSET randomization disabled.
Otherwise, it suffices to use the relative misaligment of TEXT_OFFSET
when reallocating the kernel.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
2020-03-27 16:23:52 +00:00
|
|
|
/*
|
|
|
|
* Just execute from wherever we were loaded by the
|
|
|
|
* UEFI PE/COFF loader if the alignment is suitable.
|
|
|
|
*/
|
|
|
|
*image_addr = (u64)_text;
|
|
|
|
*reserve_size = 0;
|
|
|
|
return EFI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-03-31 08:59:39 +00:00
|
|
|
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
|
2021-07-22 10:10:31 +00:00
|
|
|
ULONG_MAX, min_kimg_align);
|
2016-01-26 13:48:29 +00:00
|
|
|
|
|
|
|
if (status != EFI_SUCCESS) {
|
2020-04-30 18:28:35 +00:00
|
|
|
efi_err("Failed to relocate kernel\n");
|
2016-01-26 13:48:29 +00:00
|
|
|
*reserve_size = 0;
|
|
|
|
return status;
|
2014-04-16 02:47:52 +00:00
|
|
|
}
|
|
|
|
}
|
efi/libstub/arm64: Avoid image_base value from efi_loaded_image
Commit:
9f9223778ef3 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF entrypoint")
did some code refactoring to get rid of the EFI entry point assembler
code, and in the process, it got rid of the assignment of image_addr
to the value of _text. Instead, it switched to using the image_base
field of the efi_loaded_image struct provided by UEFI, which should
contain the same value.
However, Michael reports that this is not the case: older GRUB builds
corrupt this value in some way, and since we can easily switch back to
referring to _text to discover this value, let's simply do that.
While at it, fix another issue in commit 9f9223778ef3, which may result
in the unassigned image_addr to be misidentified as the preferred load
offset of the kernel, which is unlikely but will cause a boot crash if
it does occur.
Finally, let's add a warning if the _text vs. image_base discrepancy is
detected, so we can tell more easily how widespread this issue actually
is.
Reported-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-efi@vger.kernel.org
2020-03-29 08:05:43 +00:00
|
|
|
|
2020-08-25 13:54:40 +00:00
|
|
|
*image_addr = *reserve_addr;
|
efi/libstub/arm64: Avoid image_base value from efi_loaded_image
Commit:
9f9223778ef3 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF entrypoint")
did some code refactoring to get rid of the EFI entry point assembler
code, and in the process, it got rid of the assignment of image_addr
to the value of _text. Instead, it switched to using the image_base
field of the efi_loaded_image struct provided by UEFI, which should
contain the same value.
However, Michael reports that this is not the case: older GRUB builds
corrupt this value in some way, and since we can easily switch back to
referring to _text to discover this value, let's simply do that.
While at it, fix another issue in commit 9f9223778ef3, which may result
in the unassigned image_addr to be misidentified as the preferred load
offset of the kernel, which is unlikely but will cause a boot crash if
it does occur.
Finally, let's add a warning if the _text vs. image_base discrepancy is
detected, so we can tell more easily how widespread this issue actually
is.
Reported-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-efi@vger.kernel.org
2020-03-29 08:05:43 +00:00
|
|
|
memcpy((void *)*image_addr, _text, kernel_size);
|
2014-04-16 02:47:52 +00:00
|
|
|
|
|
|
|
return EFI_SUCCESS;
|
|
|
|
}
|