mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
arm64: kexec: move relocation function setup
Currently, kernel relocation function is configured in machine_kexec() at the time of kexec reboot by using control_code_page. This operation, however, is more logical to be done during kexec_load, and thus remove from reboot time. Move, setup of this function to newly added machine_kexec_post_load(). Because once MMU is enabled, kexec control page will contain more than relocation kernel, but also vector table, add pointer to the actual function within this page arch.kern_reloc. Currently, it equals to the beginning of page, we will add offsets later, when vector table is added. Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> Reviewed-by: James Morse <james.morse@arm.com> Link: https://lore.kernel.org/r/20210125191923.1060122-10-pasha.tatashin@soleen.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
7018d467ff
commit
4c3c31230c
@ -95,6 +95,7 @@ static inline void crash_post_resume(void) {}
|
||||
struct kimage_arch {
|
||||
void *dtb;
|
||||
phys_addr_t dtb_mem;
|
||||
phys_addr_t kern_reloc;
|
||||
/* Core ELF header buffer */
|
||||
void *elf_headers;
|
||||
unsigned long elf_headers_mem;
|
||||
|
@ -42,6 +42,7 @@ static void _kexec_image_info(const char *func, int line,
|
||||
pr_debug(" start: %lx\n", kimage->start);
|
||||
pr_debug(" head: %lx\n", kimage->head);
|
||||
pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
|
||||
pr_debug(" kern_reloc: %pa\n", &kimage->arch.kern_reloc);
|
||||
|
||||
for (i = 0; i < kimage->nr_segments; i++) {
|
||||
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
|
||||
@ -58,6 +59,22 @@ void machine_kexec_cleanup(struct kimage *kimage)
|
||||
/* Empty routine needed to avoid build errors. */
|
||||
}
|
||||
|
||||
int machine_kexec_post_load(struct kimage *kimage)
|
||||
{
|
||||
void *reloc_code = page_to_virt(kimage->control_code_page);
|
||||
|
||||
memcpy(reloc_code, arm64_relocate_new_kernel,
|
||||
arm64_relocate_new_kernel_size);
|
||||
kimage->arch.kern_reloc = __pa(reloc_code);
|
||||
|
||||
/* Flush the reloc_code in preparation for its execution. */
|
||||
__flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
|
||||
flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
|
||||
arm64_relocate_new_kernel_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* machine_kexec_prepare - Prepare for a kexec reboot.
|
||||
*
|
||||
@ -143,8 +160,6 @@ static void kexec_segment_flush(const struct kimage *kimage)
|
||||
*/
|
||||
void machine_kexec(struct kimage *kimage)
|
||||
{
|
||||
phys_addr_t reboot_code_buffer_phys;
|
||||
void *reboot_code_buffer;
|
||||
bool in_kexec_crash = (kimage == kexec_crash_image);
|
||||
bool stuck_cpus = cpus_are_stuck_in_kernel();
|
||||
|
||||
@ -155,31 +170,8 @@ void machine_kexec(struct kimage *kimage)
|
||||
WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
|
||||
"Some CPUs may be stale, kdump will be unreliable.\n");
|
||||
|
||||
reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
|
||||
reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
|
||||
|
||||
kexec_image_info(kimage);
|
||||
|
||||
/*
|
||||
* Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
|
||||
* after the kernel is shut down.
|
||||
*/
|
||||
memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
|
||||
arm64_relocate_new_kernel_size);
|
||||
|
||||
/* Flush the reboot_code_buffer in preparation for its execution. */
|
||||
__flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
|
||||
|
||||
/*
|
||||
* Although we've killed off the secondary CPUs, we don't update
|
||||
* the online mask if we're handling a crash kernel and consequently
|
||||
* need to avoid flush_icache_range(), which will attempt to IPI
|
||||
* the offline CPUs. Therefore, we must use the __* variant here.
|
||||
*/
|
||||
__flush_icache_range((uintptr_t)reboot_code_buffer,
|
||||
(uintptr_t)reboot_code_buffer +
|
||||
arm64_relocate_new_kernel_size);
|
||||
|
||||
/* Flush the kimage list and its buffers. */
|
||||
kexec_list_flush(kimage);
|
||||
|
||||
@ -193,7 +185,7 @@ void machine_kexec(struct kimage *kimage)
|
||||
|
||||
/*
|
||||
* cpu_soft_restart will shutdown the MMU, disable data caches, then
|
||||
* transfer control to the reboot_code_buffer which contains a copy of
|
||||
* transfer control to the kern_reloc which contains a copy of
|
||||
* the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
|
||||
* uses physical addressing to relocate the new image to its final
|
||||
* position and transfers control to the image entry point when the
|
||||
@ -203,7 +195,7 @@ void machine_kexec(struct kimage *kimage)
|
||||
* userspace (kexec-tools).
|
||||
* In kexec_file case, the kernel starts directly without purgatory.
|
||||
*/
|
||||
cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start,
|
||||
cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, kimage->start,
|
||||
kimage->arch.dtb_mem);
|
||||
|
||||
BUG(); /* Should never get here. */
|
||||
|
Loading…
Reference in New Issue
Block a user