mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 01:22:07 +00:00
a5d90c923b
Alex reported hitting the following BUG after the EFI 1:1 virtual mapping work was merged, kernel BUG at arch/x86/mm/init_64.c:351! invalid opcode: 0000 [#1] SMP Call Trace: [<ffffffff818aa71d>] init_extra_mapping_uc+0x13/0x15 [<ffffffff818a5e20>] uv_system_init+0x22b/0x124b [<ffffffff8108b886>] ? clockevents_register_device+0x138/0x13d [<ffffffff81028dbb>] ? setup_APIC_timer+0xc5/0xc7 [<ffffffff8108b620>] ? clockevent_delta2ns+0xb/0xd [<ffffffff818a3a92>] ? setup_boot_APIC_clock+0x4a8/0x4b7 [<ffffffff8153d955>] ? printk+0x72/0x74 [<ffffffff818a1757>] native_smp_prepare_cpus+0x389/0x3d6 [<ffffffff818957bc>] kernel_init_freeable+0xb7/0x1fb [<ffffffff81535530>] ? rest_init+0x74/0x74 [<ffffffff81535539>] kernel_init+0x9/0xff [<ffffffff81541dfc>] ret_from_fork+0x7c/0xb0 [<ffffffff81535530>] ? rest_init+0x74/0x74 Getting this thing to work with the new mapping scheme would need more work, so automatically switch to the old memmap layout for SGI UV. Acked-by: Russ Anderson <rja@sgi.com> Cc: Alex Thorlton <athorlton@sgi.com Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
173 lines
5.9 KiB
C
173 lines
5.9 KiB
C
#ifndef _ASM_X86_EFI_H
|
|
#define _ASM_X86_EFI_H
|
|
|
|
/*
|
|
* We map the EFI regions needed for runtime services non-contiguously,
|
|
* with preserved alignment on virtual addresses starting from -4G down
|
|
* for a total max space of 64G. This way, we provide for stable runtime
|
|
* services addresses across kernels so that a kexec'd kernel can still
|
|
* use them.
|
|
*
|
|
* This is the main reason why we're doing stable VA mappings for RT
|
|
* services.
|
|
*
|
|
* This flag is used in conjuction with a chicken bit called
|
|
* "efi=old_map" which can be used as a fallback to the old runtime
|
|
* services mapping method in case there's some b0rkage with a
|
|
* particular EFI implementation (haha, it is hard to hold up the
|
|
* sarcasm here...).
|
|
*/
|
|
#define EFI_OLD_MEMMAP EFI_ARCH_1
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
#define EFI_LOADER_SIGNATURE "EL32"
|
|
|
|
extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
|
|
|
#define efi_call_phys0(f) efi_call_phys(f)
|
|
#define efi_call_phys1(f, a1) efi_call_phys(f, a1)
|
|
#define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2)
|
|
#define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3)
|
|
#define efi_call_phys4(f, a1, a2, a3, a4) \
|
|
efi_call_phys(f, a1, a2, a3, a4)
|
|
#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
|
|
efi_call_phys(f, a1, a2, a3, a4, a5)
|
|
#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
|
|
efi_call_phys(f, a1, a2, a3, a4, a5, a6)
|
|
/*
|
|
* Wrap all the virtual calls in a way that forces the parameters on the stack.
|
|
*/
|
|
|
|
#define efi_call_virt(f, args...) \
|
|
((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
|
|
|
|
#define efi_call_virt0(f) efi_call_virt(f)
|
|
#define efi_call_virt1(f, a1) efi_call_virt(f, a1)
|
|
#define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2)
|
|
#define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3)
|
|
#define efi_call_virt4(f, a1, a2, a3, a4) \
|
|
efi_call_virt(f, a1, a2, a3, a4)
|
|
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
|
|
efi_call_virt(f, a1, a2, a3, a4, a5)
|
|
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
|
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
|
|
|
|
#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
|
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
#define EFI_LOADER_SIGNATURE "EL64"
|
|
|
|
extern u64 efi_call0(void *fp);
|
|
extern u64 efi_call1(void *fp, u64 arg1);
|
|
extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
|
|
extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3);
|
|
extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4);
|
|
extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3,
|
|
u64 arg4, u64 arg5);
|
|
extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
|
|
u64 arg4, u64 arg5, u64 arg6);
|
|
|
|
#define efi_call_phys0(f) \
|
|
efi_call0((f))
|
|
#define efi_call_phys1(f, a1) \
|
|
efi_call1((f), (u64)(a1))
|
|
#define efi_call_phys2(f, a1, a2) \
|
|
efi_call2((f), (u64)(a1), (u64)(a2))
|
|
#define efi_call_phys3(f, a1, a2, a3) \
|
|
efi_call3((f), (u64)(a1), (u64)(a2), (u64)(a3))
|
|
#define efi_call_phys4(f, a1, a2, a3, a4) \
|
|
efi_call4((f), (u64)(a1), (u64)(a2), (u64)(a3), \
|
|
(u64)(a4))
|
|
#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
|
|
efi_call5((f), (u64)(a1), (u64)(a2), (u64)(a3), \
|
|
(u64)(a4), (u64)(a5))
|
|
#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
|
|
efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \
|
|
(u64)(a4), (u64)(a5), (u64)(a6))
|
|
|
|
#define _efi_call_virtX(x, f, ...) \
|
|
({ \
|
|
efi_status_t __s; \
|
|
\
|
|
efi_sync_low_kernel_mappings(); \
|
|
preempt_disable(); \
|
|
__s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \
|
|
preempt_enable(); \
|
|
__s; \
|
|
})
|
|
|
|
#define efi_call_virt0(f) \
|
|
_efi_call_virtX(0, f)
|
|
#define efi_call_virt1(f, a1) \
|
|
_efi_call_virtX(1, f, (u64)(a1))
|
|
#define efi_call_virt2(f, a1, a2) \
|
|
_efi_call_virtX(2, f, (u64)(a1), (u64)(a2))
|
|
#define efi_call_virt3(f, a1, a2, a3) \
|
|
_efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3))
|
|
#define efi_call_virt4(f, a1, a2, a3, a4) \
|
|
_efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4))
|
|
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
|
|
_efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5))
|
|
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
|
_efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
|
|
|
|
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
|
|
u32 type, u64 attribute);
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
extern int add_efi_memmap;
|
|
extern unsigned long x86_efi_facility;
|
|
extern struct efi_scratch efi_scratch;
|
|
extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
|
|
extern int efi_memblock_x86_reserve_range(void);
|
|
extern void efi_call_phys_prelog(void);
|
|
extern void efi_call_phys_epilog(void);
|
|
extern void efi_unmap_memmap(void);
|
|
extern void efi_memory_uc(u64 addr, unsigned long size);
|
|
extern void __init efi_map_region(efi_memory_desc_t *md);
|
|
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
|
|
extern void efi_sync_low_kernel_mappings(void);
|
|
extern void efi_setup_page_tables(void);
|
|
extern void __init old_map_region(efi_memory_desc_t *md);
|
|
extern void __init runtime_code_page_mkexec(void);
|
|
extern void __init efi_runtime_mkexec(void);
|
|
extern void __init efi_apply_memmap_quirks(void);
|
|
|
|
struct efi_setup_data {
|
|
u64 fw_vendor;
|
|
u64 runtime;
|
|
u64 tables;
|
|
u64 smbios;
|
|
u64 reserved[8];
|
|
};
|
|
|
|
extern u64 efi_setup;
|
|
|
|
#ifdef CONFIG_EFI
|
|
|
|
static inline bool efi_is_native(void)
|
|
{
|
|
return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
|
|
}
|
|
|
|
extern struct console early_efi_console;
|
|
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
|
|
#else
|
|
/*
|
|
* IF EFI is not configured, have the EFI calls return -ENOSYS.
|
|
*/
|
|
#define efi_call0(_f) (-ENOSYS)
|
|
#define efi_call1(_f, _a1) (-ENOSYS)
|
|
#define efi_call2(_f, _a1, _a2) (-ENOSYS)
|
|
#define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS)
|
|
#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
|
|
#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
|
|
#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
|
|
static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
|
|
#endif /* CONFIG_EFI */
|
|
|
|
#endif /* _ASM_X86_EFI_H */
|