linux/arch/arm64/kernel/vdso.c
Vincenzo Frascino c7aa2d7102 arm64: vdso: Refactor vDSO code
Most of the code for initializing the vDSOs in arm64 and compat will be
shared, hence refactoring of the current code is required to avoid
duplication and to simplify maintainability.

No functional change.

Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Shijith Thotton <sthotton@marvell.com>
Tested-by: Andre Przywara <andre.przywara@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mips@vger.kernel.org
Cc: linux-kselftest@vger.kernel.org
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Mark Salyzyn <salyzyn@android.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Dmitry Safonov <0x7f454c46@gmail.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Huw Davies <huw@codeweavers.com>
Link: https://lkml.kernel.org/r/20190621095252.32307-12-vincenzo.frascino@arm.com
2019-06-22 21:21:08 +02:00

347 lines
7.8 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* VDSO implementations.
*
* Copyright (C) 2012 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#include <linux/cache.h>
#include <linux/clocksource.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
#include <linux/vmalloc.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
#include <vdso/vsyscall.h>
#include <asm/cacheflush.h>
#include <asm/signal32.h>
#include <asm/vdso.h>
extern char vdso_start[], vdso_end[];
/* vdso_lookup arch_index */
enum arch_vdso_type {
ARM64_VDSO = 0,
};
#define VDSO_TYPES (ARM64_VDSO + 1)
struct __vdso_abi {
const char *name;
const char *vdso_code_start;
const char *vdso_code_end;
unsigned long vdso_pages;
/* Data Mapping */
struct vm_special_mapping *dm;
/* Code Mapping */
struct vm_special_mapping *cm;
};
static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = {
{
.name = "vdso",
.vdso_code_start = vdso_start,
.vdso_code_end = vdso_end,
},
};
/*
* The vDSO data page.
*/
static union {
struct vdso_data data[CS_BASES];
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = vdso_data_store.data;
static int __vdso_remap(enum arch_vdso_type arch_index,
const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
vdso_lookup[arch_index].vdso_code_start;
if (vdso_size != new_size)
return -EINVAL;
current->mm->context.vdso = (void *)new_vma->vm_start;
return 0;
}
static int __vdso_init(enum arch_vdso_type arch_index)
{
int i;
struct page **vdso_pagelist;
unsigned long pfn;
if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
return -EINVAL;
}
vdso_lookup[arch_index].vdso_pages = (
vdso_lookup[arch_index].vdso_code_end -
vdso_lookup[arch_index].vdso_code_start) >>
PAGE_SHIFT;
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
sizeof(struct page *),
GFP_KERNEL);
if (vdso_pagelist == NULL)
return -ENOMEM;
/* Grab the vDSO data page. */
vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
/* Grab the vDSO code pages. */
pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
return 0;
}
static int __setup_additional_pages(enum arch_vdso_type arch_index,
struct mm_struct *mm,
struct linux_binprm *bprm,
int uses_interp)
{
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
void *ret;
vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + PAGE_SIZE;
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = ERR_PTR(vdso_base);
goto up_fail;
}
ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_MAYREAD,
vdso_lookup[arch_index].dm);
if (IS_ERR(ret))
goto up_fail;
vdso_base += PAGE_SIZE;
mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_lookup[arch_index].cm);
if (IS_ERR(ret))
goto up_fail;
return 0;
up_fail:
mm->context.vdso = NULL;
return PTR_ERR(ret);
}
#ifdef CONFIG_COMPAT
/*
* Create and map the vectors page for AArch32 tasks.
*/
/*
* aarch32_vdso_pages:
* 0 - kuser helpers
* 1 - sigreturn code
*/
#define C_VECTORS 0
#define C_SIGPAGE 1
#define C_PAGES (C_SIGPAGE + 1)
static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
{
.name = "[vectors]", /* ABI */
.pages = &aarch32_vdso_pages[C_VECTORS],
},
{
.name = "[sigpage]", /* ABI */
.pages = &aarch32_vdso_pages[C_SIGPAGE],
},
};
static int aarch32_alloc_kuser_vdso_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
unsigned long vdso_page;
if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
return 0;
vdso_page = get_zeroed_page(GFP_ATOMIC);
if (!vdso_page)
return -ENOMEM;
memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
kuser_sz);
aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
return 0;
}
static int __init aarch32_alloc_vdso_pages(void)
{
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long sigpage;
int ret;
sigpage = get_zeroed_page(GFP_ATOMIC);
if (!sigpage)
return -ENOMEM;
memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
ret = aarch32_alloc_kuser_vdso_page();
if (ret)
free_page(sigpage);
return ret;
}
arch_initcall(aarch32_alloc_vdso_pages);
static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
{
void *ret;
if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
return 0;
/*
* Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
* not safe to CoW the page containing the CPU exception vectors.
*/
ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYEXEC,
&aarch32_vdso_spec[C_VECTORS]);
return PTR_ERR_OR_ZERO(ret);
}
static int aarch32_sigreturn_setup(struct mm_struct *mm)
{
unsigned long addr;
void *ret;
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = ERR_PTR(addr);
goto out;
}
/*
* VM_MAYWRITE is required to allow gdb to Copy-on-Write and
* set breakpoints.
*/
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYWRITE | VM_MAYEXEC,
&aarch32_vdso_spec[C_SIGPAGE]);
if (IS_ERR(ret))
goto out;
mm->context.vdso = (void *)addr;
out:
return PTR_ERR_OR_ZERO(ret);
}
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
int ret;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
ret = aarch32_kuser_helpers_setup(mm);
if (ret)
goto out;
ret = aarch32_sigreturn_setup(mm);
out:
up_write(&mm->mmap_sem);
return ret;
}
#endif /* CONFIG_COMPAT */
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
return __vdso_remap(ARM64_VDSO, sm, new_vma);
}
/*
* aarch64_vdso_pages:
* 0 - vvar
* 1 - vdso
*/
#define A_VVAR 0
#define A_VDSO 1
#define A_PAGES (A_VDSO + 1)
static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
{
.name = "[vvar]",
},
{
.name = "[vdso]",
.mremap = vdso_mremap,
},
};
static int __init vdso_init(void)
{
vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
return __vdso_init(ARM64_VDSO);
}
arch_initcall(vdso_init);
int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
struct mm_struct *mm = current->mm;
int ret;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
ret = __setup_additional_pages(ARM64_VDSO,
mm,
bprm,
uses_interp);
up_write(&mm->mmap_sem);
return ret;
}