mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 16:12:02 +00:00
3e10585335
- Support for userspace to emulate Xen hypercalls - Raise the maximum number of user memslots - Scalability improvements for the new MMU. Instead of the complex "fast page fault" logic that is used in mmu.c, tdp_mmu.c uses an rwlock so that page faults are concurrent, but the code that can run against page faults is limited. Right now only page faults take the lock for reading; in the future this will be extended to some cases of page table destruction. I hope to switch the default MMU around 5.12-rc3 (some testing was delayed due to Chinese New Year). - Cleanups for MAXPHYADDR checks - Use static calls for vendor-specific callbacks - On AMD, use VMLOAD/VMSAVE to save and restore host state - Stop using deprecated jump label APIs - Workaround for AMD erratum that made nested virtualization unreliable - Support for LBR emulation in the guest - Support for communicating bus lock vmexits to userspace - Add support for SEV attestation command - Miscellaneous cleanups PPC: - Support for second data watchpoint on POWER10 - Remove some complex workarounds for buggy early versions of POWER9 - Guest entry/exit fixes ARM64 - Make the nVHE EL2 object relocatable - Cleanups for concurrent translation faults hitting the same page - Support for the standard TRNG hypervisor call - A bunch of small PMU/Debug fixes - Simplification of the early init hypercall handling Non-KVM changes (with acks): - Detection of contended rwlocks (implemented only for qrwlocks, because KVM only needs it for x86) - Allow __DISABLE_EXPORTS from assembly code - Provide a saner follow_pfn replacements for modules -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmApSRgUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroOc7wf9FnlinKoTFaSk7oeuuhF/CoCVwSFs Z9+A2sNI99tWHQxFR6dyDkEFeQoXnqSxfLHtUVIdH/JnTg0FkEvFz3NK+0PzY1PF PnGNbSoyhP58mSBG4gbBAxdF3ZJZMB8GBgYPeR62PvMX2dYbcHqVBNhlf6W4MQK4 5mAUuAnbf19O5N267sND+sIg3wwJYwOZpRZB7PlwvfKAGKf18gdBz5dQ/6Ej+apf P7GODZITjqM5Iho7SDm/sYJlZprFZT81KqffwJQHWFMEcxFgwzrnYPx7J3gFwRTR eeh9E61eCBDyCTPpHROLuNTVBqrAioCqXLdKOtO5gKvZI3zmomvAsZ8uXQ== =uFZU -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM updates from Paolo Bonzini: "x86: - Support for userspace to emulate Xen hypercalls - Raise the maximum number of user memslots - Scalability improvements for the new MMU. Instead of the complex "fast page fault" logic that is used in mmu.c, tdp_mmu.c uses an rwlock so that page faults are concurrent, but the code that can run against page faults is limited. Right now only page faults take the lock for reading; in the future this will be extended to some cases of page table destruction. I hope to switch the default MMU around 5.12-rc3 (some testing was delayed due to Chinese New Year). - Cleanups for MAXPHYADDR checks - Use static calls for vendor-specific callbacks - On AMD, use VMLOAD/VMSAVE to save and restore host state - Stop using deprecated jump label APIs - Workaround for AMD erratum that made nested virtualization unreliable - Support for LBR emulation in the guest - Support for communicating bus lock vmexits to userspace - Add support for SEV attestation command - Miscellaneous cleanups PPC: - Support for second data watchpoint on POWER10 - Remove some complex workarounds for buggy early versions of POWER9 - Guest entry/exit fixes ARM64: - Make the nVHE EL2 object relocatable - Cleanups for concurrent translation faults hitting the same page - Support for the standard TRNG hypervisor call - A bunch of small PMU/Debug fixes - Simplification of the early init hypercall handling Non-KVM changes (with acks): - Detection of contended rwlocks (implemented only for qrwlocks, because KVM only needs it for x86) - Allow __DISABLE_EXPORTS from assembly code - Provide a saner follow_pfn replacements for modules" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (192 commits) KVM: x86/xen: Explicitly pad struct compat_vcpu_info to 64 bytes KVM: selftests: Don't bother mapping GVA for Xen shinfo test KVM: selftests: Fix hex vs. decimal snafu in Xen test KVM: selftests: Fix size of memslots created by Xen tests KVM: selftests: Ignore recently added Xen tests' build output KVM: selftests: Add missing header file needed by xAPIC IPI tests KVM: selftests: Add operand to vmsave/vmload/vmrun in svm.c KVM: SVM: Make symbol 'svm_gp_erratum_intercept' static locking/arch: Move qrwlock.h include after qspinlock.h KVM: PPC: Book3S HV: Fix host radix SLB optimisation with hash guests KVM: PPC: Book3S HV: Ensure radix guest has no SLB entries KVM: PPC: Don't always report hash MMU capability for P9 < DD2.2 KVM: PPC: Book3S HV: Save and restore FSCR in the P9 path KVM: PPC: remove unneeded semicolon KVM: PPC: Book3S HV: Use POWER9 SLBIA IH=6 variant to clear SLB KVM: PPC: Book3S HV: No need to clear radix host SLB before loading HPT guest KVM: PPC: Book3S HV: Fix radix guest SLB side channel KVM: PPC: Book3S HV: Remove support for running HPT guest on RPT host without mixed mode support KVM: PPC: Book3S HV: Introduce new capability for 2nd DAWR KVM: PPC: Book3S HV: Add infrastructure to support 2nd DAWR ...
339 lines
7.6 KiB
ArmAsm
339 lines
7.6 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* ld script to make ARM Linux kernel
|
|
* taken from the i386 version by Russell King
|
|
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
|
*/
|
|
|
|
#define RO_EXCEPTION_TABLE_ALIGN 8
|
|
#define RUNTIME_DISCARD_EXIT
|
|
|
|
#include <asm-generic/vmlinux.lds.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/hyp_image.h>
|
|
#include <asm/kernel-pgtable.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/page.h>
|
|
|
|
#include "image.h"
|
|
|
|
OUTPUT_ARCH(aarch64)
|
|
ENTRY(_text)
|
|
|
|
jiffies = jiffies_64;
|
|
|
|
|
|
#ifdef CONFIG_KVM
|
|
#define HYPERVISOR_EXTABLE \
|
|
. = ALIGN(SZ_8); \
|
|
__start___kvm_ex_table = .; \
|
|
*(__kvm_ex_table) \
|
|
__stop___kvm_ex_table = .;
|
|
|
|
#define HYPERVISOR_DATA_SECTIONS \
|
|
HYP_SECTION_NAME(.rodata) : { \
|
|
__hyp_rodata_start = .; \
|
|
*(HYP_SECTION_NAME(.data..ro_after_init)) \
|
|
*(HYP_SECTION_NAME(.rodata)) \
|
|
__hyp_rodata_end = .; \
|
|
}
|
|
|
|
#define HYPERVISOR_PERCPU_SECTION \
|
|
. = ALIGN(PAGE_SIZE); \
|
|
HYP_SECTION_NAME(.data..percpu) : { \
|
|
*(HYP_SECTION_NAME(.data..percpu)) \
|
|
}
|
|
|
|
#define HYPERVISOR_RELOC_SECTION \
|
|
.hyp.reloc : ALIGN(4) { \
|
|
__hyp_reloc_begin = .; \
|
|
*(.hyp.reloc) \
|
|
__hyp_reloc_end = .; \
|
|
}
|
|
|
|
#else /* CONFIG_KVM */
|
|
#define HYPERVISOR_EXTABLE
|
|
#define HYPERVISOR_DATA_SECTIONS
|
|
#define HYPERVISOR_PERCPU_SECTION
|
|
#define HYPERVISOR_RELOC_SECTION
|
|
#endif
|
|
|
|
#define HYPERVISOR_TEXT \
|
|
/* \
|
|
* Align to 4 KB so that \
|
|
* a) the HYP vector table is at its minimum \
|
|
* alignment of 2048 bytes \
|
|
* b) the HYP init code will not cross a page \
|
|
* boundary if its size does not exceed \
|
|
* 4 KB (see related ASSERT() below) \
|
|
*/ \
|
|
. = ALIGN(SZ_4K); \
|
|
__hyp_idmap_text_start = .; \
|
|
*(.hyp.idmap.text) \
|
|
__hyp_idmap_text_end = .; \
|
|
__hyp_text_start = .; \
|
|
*(.hyp.text) \
|
|
HYPERVISOR_EXTABLE \
|
|
__hyp_text_end = .;
|
|
|
|
#define IDMAP_TEXT \
|
|
. = ALIGN(SZ_4K); \
|
|
__idmap_text_start = .; \
|
|
*(.idmap.text) \
|
|
__idmap_text_end = .;
|
|
|
|
#ifdef CONFIG_HIBERNATION
|
|
#define HIBERNATE_TEXT \
|
|
. = ALIGN(SZ_4K); \
|
|
__hibernate_exit_text_start = .; \
|
|
*(.hibernate_exit.text) \
|
|
__hibernate_exit_text_end = .;
|
|
#else
|
|
#define HIBERNATE_TEXT
|
|
#endif
|
|
|
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
|
#define TRAMP_TEXT \
|
|
. = ALIGN(PAGE_SIZE); \
|
|
__entry_tramp_text_start = .; \
|
|
*(.entry.tramp.text) \
|
|
. = ALIGN(PAGE_SIZE); \
|
|
__entry_tramp_text_end = .;
|
|
#else
|
|
#define TRAMP_TEXT
|
|
#endif
|
|
|
|
/*
|
|
* The size of the PE/COFF section that covers the kernel image, which
|
|
* runs from _stext to _edata, must be a round multiple of the PE/COFF
|
|
* FileAlignment, which we set to its minimum value of 0x200. '_stext'
|
|
* itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
|
|
* boundary should be sufficient.
|
|
*/
|
|
PECOFF_FILE_ALIGNMENT = 0x200;
|
|
|
|
#ifdef CONFIG_EFI
|
|
#define PECOFF_EDATA_PADDING \
|
|
.pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
|
|
#else
|
|
#define PECOFF_EDATA_PADDING
|
|
#endif
|
|
|
|
SECTIONS
|
|
{
|
|
/*
|
|
* XXX: The linker does not define how output sections are
|
|
* assigned to input sections when there are multiple statements
|
|
* matching the same input section name. There is no documented
|
|
* order of matching.
|
|
*/
|
|
DISCARDS
|
|
/DISCARD/ : {
|
|
*(.interp .dynamic)
|
|
*(.dynsym .dynstr .hash .gnu.hash)
|
|
}
|
|
|
|
. = KIMAGE_VADDR;
|
|
|
|
.head.text : {
|
|
_text = .;
|
|
HEAD_TEXT
|
|
}
|
|
.text : ALIGN(SEGMENT_ALIGN) { /* Real text segment */
|
|
_stext = .; /* Text and read-only data */
|
|
IRQENTRY_TEXT
|
|
SOFTIRQENTRY_TEXT
|
|
ENTRY_TEXT
|
|
TEXT_TEXT
|
|
SCHED_TEXT
|
|
CPUIDLE_TEXT
|
|
LOCK_TEXT
|
|
KPROBES_TEXT
|
|
HYPERVISOR_TEXT
|
|
IDMAP_TEXT
|
|
HIBERNATE_TEXT
|
|
TRAMP_TEXT
|
|
*(.fixup)
|
|
*(.gnu.warning)
|
|
. = ALIGN(16);
|
|
*(.got) /* Global offset table */
|
|
}
|
|
|
|
/*
|
|
* Make sure that the .got.plt is either completely empty or it
|
|
* contains only the lazy dispatch entries.
|
|
*/
|
|
.got.plt : { *(.got.plt) }
|
|
ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
|
|
"Unexpected GOT/PLT entries detected!")
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
_etext = .; /* End of text section */
|
|
|
|
/* everything from this point to __init_begin will be marked RO NX */
|
|
RO_DATA(PAGE_SIZE)
|
|
|
|
idmap_pg_dir = .;
|
|
. += IDMAP_DIR_SIZE;
|
|
idmap_pg_end = .;
|
|
|
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
|
tramp_pg_dir = .;
|
|
. += PAGE_SIZE;
|
|
#endif
|
|
|
|
reserved_pg_dir = .;
|
|
. += PAGE_SIZE;
|
|
|
|
swapper_pg_dir = .;
|
|
. += PAGE_SIZE;
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
__init_begin = .;
|
|
__inittext_begin = .;
|
|
|
|
INIT_TEXT_SECTION(8)
|
|
|
|
__exittext_begin = .;
|
|
.exit.text : {
|
|
EXIT_TEXT
|
|
}
|
|
__exittext_end = .;
|
|
|
|
. = ALIGN(4);
|
|
.altinstructions : {
|
|
__alt_instructions = .;
|
|
*(.altinstructions)
|
|
__alt_instructions_end = .;
|
|
}
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
__inittext_end = .;
|
|
__initdata_begin = .;
|
|
|
|
.init.data : {
|
|
INIT_DATA
|
|
INIT_SETUP(16)
|
|
INIT_CALLS
|
|
CON_INITCALL
|
|
INIT_RAM_FS
|
|
*(.init.altinstructions .init.bss) /* from the EFI stub */
|
|
}
|
|
.exit.data : {
|
|
EXIT_DATA
|
|
}
|
|
|
|
PERCPU_SECTION(L1_CACHE_BYTES)
|
|
HYPERVISOR_PERCPU_SECTION
|
|
|
|
HYPERVISOR_RELOC_SECTION
|
|
|
|
.rela.dyn : ALIGN(8) {
|
|
*(.rela .rela*)
|
|
}
|
|
|
|
__rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
|
|
__rela_size = SIZEOF(.rela.dyn);
|
|
|
|
#ifdef CONFIG_RELR
|
|
.relr.dyn : ALIGN(8) {
|
|
*(.relr.dyn)
|
|
}
|
|
|
|
__relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
|
|
__relr_size = SIZEOF(.relr.dyn);
|
|
#endif
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
__initdata_end = .;
|
|
__init_end = .;
|
|
|
|
_data = .;
|
|
_sdata = .;
|
|
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
|
|
|
|
HYPERVISOR_DATA_SECTIONS
|
|
|
|
/*
|
|
* Data written with the MMU off but read with the MMU on requires
|
|
* cache lines to be invalidated, discarding up to a Cache Writeback
|
|
* Granule (CWG) of data from the cache. Keep the section that
|
|
* requires this type of maintenance to be in its own Cache Writeback
|
|
* Granule (CWG) area so the cache maintenance operations don't
|
|
* interfere with adjacent data.
|
|
*/
|
|
.mmuoff.data.write : ALIGN(SZ_2K) {
|
|
__mmuoff_data_start = .;
|
|
*(.mmuoff.data.write)
|
|
}
|
|
. = ALIGN(SZ_2K);
|
|
.mmuoff.data.read : {
|
|
*(.mmuoff.data.read)
|
|
__mmuoff_data_end = .;
|
|
}
|
|
|
|
PECOFF_EDATA_PADDING
|
|
__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
|
|
_edata = .;
|
|
|
|
BSS_SECTION(0, 0, 0)
|
|
|
|
. = ALIGN(PAGE_SIZE);
|
|
init_pg_dir = .;
|
|
. += INIT_DIR_SIZE;
|
|
init_pg_end = .;
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
|
|
_end = .;
|
|
|
|
STABS_DEBUG
|
|
DWARF_DEBUG
|
|
ELF_DETAILS
|
|
|
|
HEAD_SYMBOLS
|
|
|
|
/*
|
|
* Sections that should stay zero sized, which is safer to
|
|
* explicitly check instead of blindly discarding.
|
|
*/
|
|
.plt : {
|
|
*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
|
|
}
|
|
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
|
|
|
|
.data.rel.ro : { *(.data.rel.ro) }
|
|
ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
|
|
}
|
|
|
|
#include "image-vars.h"
|
|
|
|
/*
|
|
* The HYP init code and ID map text can't be longer than a page each,
|
|
* and should not cross a page boundary.
|
|
*/
|
|
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
|
|
"HYP init code too big or misaligned")
|
|
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
|
|
"ID map text too big or misaligned")
|
|
#ifdef CONFIG_HIBERNATION
|
|
ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
|
|
<= SZ_4K, "Hibernate exit text too big or misaligned")
|
|
#endif
|
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
|
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
|
|
"Entry trampoline text too big")
|
|
#endif
|
|
/*
|
|
* If padding is applied before .head.text, virt<->phys conversions will fail.
|
|
*/
|
|
ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned")
|
|
|
|
ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
|
|
"RESERVED_SWAPPER_OFFSET is wrong!")
|
|
|
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
|
ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
|
|
"TRAMP_SWAPPER_OFFSET is wrong!")
|
|
#endif
|