linux/arch/arm64/mm/physaddr.c
Laura Abbott ec6d06efb0 arm64: Add support for CONFIG_DEBUG_VIRTUAL
x86 has an option CONFIG_DEBUG_VIRTUAL to do additional checks
on virt_to_phys calls. The goal is to catch users who are calling
virt_to_phys on non-linear addresses immediately. This inclues callers
using virt_to_phys on image addresses instead of __pa_symbol. As features
such as CONFIG_VMAP_STACK get enabled for arm64, this becomes increasingly
important. Add checks to catch bad virt_to_phys usage.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-01-12 15:05:39 +00:00

31 lines
737 B
C

#include <linux/bug.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/mmdebug.h>
#include <linux/mm.h>
#include <asm/memory.h>
phys_addr_t __virt_to_phys(unsigned long x)
{
WARN(!__is_lm_address(x),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x,
(void *)x);
return __virt_to_phys_nodebug(x);
}
EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
/*
* This is bounds checking against the kernel image only.
* __pa_symbol should only be used on kernel symbol addresses.
*/
VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START ||
x > (unsigned long) KERNEL_END);
return __pa_symbol_nodebug(x);
}
EXPORT_SYMBOL(__phys_addr_symbol);