x86: fix EFI mapping
The patch updates EFI runtime memory mapping code, by making EFI areas explicitly executable. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
f56d005d30
commit
1c083eb2cb
@ -379,11 +379,9 @@ void __init efi_init(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
|
||||||
static void __init runtime_code_page_mkexec(void)
|
static void __init runtime_code_page_mkexec(void)
|
||||||
{
|
{
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md;
|
||||||
unsigned long end;
|
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
if (!(__supported_pte_mask & _PAGE_NX))
|
if (!(__supported_pte_mask & _PAGE_NX))
|
||||||
@ -392,18 +390,13 @@ static void __init runtime_code_page_mkexec(void)
|
|||||||
/* Make EFI runtime service code area executable */
|
/* Make EFI runtime service code area executable */
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
md = p;
|
md = p;
|
||||||
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
|
|
||||||
if (md->type == EFI_RUNTIME_SERVICES_CODE &&
|
if (md->type != EFI_RUNTIME_SERVICES_CODE)
|
||||||
(end >> PAGE_SHIFT) <= max_pfn_mapped) {
|
continue;
|
||||||
set_memory_x(md->virt_addr, md->num_pages);
|
|
||||||
set_memory_uc(md->virt_addr, md->num_pages);
|
set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
__flush_tlb_all();
|
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static inline void __init runtime_code_page_mkexec(void) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function will switch the EFI runtime services to virtual mode.
|
* This function will switch the EFI runtime services to virtual mode.
|
||||||
@ -417,30 +410,40 @@ void __init efi_enter_virtual_mode(void)
|
|||||||
{
|
{
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md;
|
||||||
efi_status_t status;
|
efi_status_t status;
|
||||||
unsigned long end;
|
unsigned long size;
|
||||||
void *p;
|
u64 end, systab;
|
||||||
|
void *p, *va;
|
||||||
|
|
||||||
efi.systab = NULL;
|
efi.systab = NULL;
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
md = p;
|
md = p;
|
||||||
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
||||||
continue;
|
continue;
|
||||||
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
|
|
||||||
if ((md->attribute & EFI_MEMORY_WB) &&
|
size = md->num_pages << EFI_PAGE_SHIFT;
|
||||||
((end >> PAGE_SHIFT) <= max_pfn_mapped))
|
end = md->phys_addr + size;
|
||||||
md->virt_addr = (unsigned long)__va(md->phys_addr);
|
|
||||||
|
if ((end >> PAGE_SHIFT) <= max_pfn_mapped)
|
||||||
|
va = __va(md->phys_addr);
|
||||||
else
|
else
|
||||||
md->virt_addr = (unsigned long)
|
va = efi_ioremap(md->phys_addr, size);
|
||||||
efi_ioremap(md->phys_addr,
|
|
||||||
md->num_pages << EFI_PAGE_SHIFT);
|
if (md->attribute & EFI_MEMORY_WB)
|
||||||
if (!md->virt_addr)
|
set_memory_uc(md->virt_addr, size);
|
||||||
|
|
||||||
|
md->virt_addr = (u64) (unsigned long) va;
|
||||||
|
|
||||||
|
if (!va) {
|
||||||
printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
|
printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
|
||||||
(unsigned long long)md->phys_addr);
|
(unsigned long long)md->phys_addr);
|
||||||
if ((md->phys_addr <= (unsigned long)efi_phys.systab) &&
|
continue;
|
||||||
((unsigned long)efi_phys.systab < end))
|
}
|
||||||
efi.systab = (efi_system_table_t *)(unsigned long)
|
|
||||||
(md->virt_addr - md->phys_addr +
|
systab = (u64) (unsigned long) efi_phys.systab;
|
||||||
(unsigned long)efi_phys.systab);
|
if (md->phys_addr <= systab && systab < end) {
|
||||||
|
systab += md->virt_addr - md->phys_addr;
|
||||||
|
efi.systab = (efi_system_table_t *) (unsigned long) systab;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(!efi.systab);
|
BUG_ON(!efi.systab);
|
||||||
|
@ -54,10 +54,10 @@ static void __init early_mapping_set_exec(unsigned long start,
|
|||||||
else
|
else
|
||||||
set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
|
set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
|
||||||
__supported_pte_mask));
|
__supported_pte_mask));
|
||||||
if (level == 4)
|
if (level == PG_LEVEL_4K)
|
||||||
start = (start + PMD_SIZE) & PMD_MASK;
|
|
||||||
else
|
|
||||||
start = (start + PAGE_SIZE) & PAGE_MASK;
|
start = (start + PAGE_SIZE) & PAGE_MASK;
|
||||||
|
else
|
||||||
|
start = (start + PMD_SIZE) & PMD_MASK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,23 +109,23 @@ void __init efi_reserve_bootmem(void)
|
|||||||
memmap.nr_map * memmap.desc_size);
|
memmap.nr_map * memmap.desc_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __iomem * __init efi_ioremap(unsigned long offset,
|
void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
|
||||||
unsigned long size)
|
|
||||||
{
|
{
|
||||||
static unsigned pages_mapped;
|
static unsigned pages_mapped;
|
||||||
unsigned long last_addr;
|
|
||||||
unsigned i, pages;
|
unsigned i, pages;
|
||||||
|
|
||||||
last_addr = offset + size - 1;
|
/* phys_addr and size must be page aligned */
|
||||||
offset &= PAGE_MASK;
|
if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK))
|
||||||
pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
|
return NULL;
|
||||||
|
|
||||||
|
pages = size >> PAGE_SHIFT;
|
||||||
if (pages_mapped + pages > MAX_EFI_IO_PAGES)
|
if (pages_mapped + pages > MAX_EFI_IO_PAGES)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
for (i = 0; i < pages; i++) {
|
for (i = 0; i < pages; i++) {
|
||||||
__set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
|
__set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
|
||||||
offset, PAGE_KERNEL_EXEC_NOCACHE);
|
phys_addr, PAGE_KERNEL);
|
||||||
offset += PAGE_SIZE;
|
phys_addr += PAGE_SIZE;
|
||||||
pages_mapped++;
|
pages_mapped++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
|||||||
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
||||||
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
|
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
|
||||||
|
|
||||||
#define efi_ioremap(addr, size) ioremap(addr, size)
|
#define efi_ioremap(addr, size) ioremap_cache(addr, size)
|
||||||
|
|
||||||
#else /* !CONFIG_X86_32 */
|
#else /* !CONFIG_X86_32 */
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
|
|||||||
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
||||||
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
|
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
|
||||||
|
|
||||||
extern void *efi_ioremap(unsigned long offset, unsigned long size);
|
extern void *efi_ioremap(unsigned long addr, unsigned long size);
|
||||||
|
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user