mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 17:12:06 +00:00
f2ca09f381
The set_memory_* functions currently only support module addresses. The addresses are validated using is_module_addr. That function is special though and relies on internal state in the module subsystem to work properly. At the time of module initialization and calling set_memory_*, it's too early for is_module_addr to work properly so it always returns false. Rather than be subject to the whims of the module state, just bounds check against the module virtual address range. Signed-off-by: Laura Abbott <lauraa@codeaurora.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
95 lines
2.2 KiB
C
95 lines
2.2 KiB
C
/*
|
|
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* only version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
struct page_change_data {
|
|
pgprot_t set_mask;
|
|
pgprot_t clear_mask;
|
|
};
|
|
|
|
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
|
|
void *data)
|
|
{
|
|
struct page_change_data *cdata = data;
|
|
pte_t pte = *ptep;
|
|
|
|
pte = clear_pte_bit(pte, cdata->clear_mask);
|
|
pte = set_pte_bit(pte, cdata->set_mask);
|
|
|
|
set_pte_ext(ptep, pte, 0);
|
|
return 0;
|
|
}
|
|
|
|
static int change_memory_common(unsigned long addr, int numpages,
|
|
pgprot_t set_mask, pgprot_t clear_mask)
|
|
{
|
|
unsigned long start = addr;
|
|
unsigned long size = PAGE_SIZE*numpages;
|
|
unsigned long end = start + size;
|
|
int ret;
|
|
struct page_change_data data;
|
|
|
|
if (!IS_ALIGNED(addr, PAGE_SIZE)) {
|
|
start &= PAGE_MASK;
|
|
end = start + size;
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
|
|
if (start < MODULES_VADDR || start >= MODULES_END)
|
|
return -EINVAL;
|
|
|
|
if (end < MODULES_VADDR || start >= MODULES_END)
|
|
return -EINVAL;
|
|
|
|
data.set_mask = set_mask;
|
|
data.clear_mask = clear_mask;
|
|
|
|
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
|
|
&data);
|
|
|
|
flush_tlb_kernel_range(start, end);
|
|
return ret;
|
|
}
|
|
|
|
int set_memory_ro(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(L_PTE_RDONLY),
|
|
__pgprot(0));
|
|
}
|
|
|
|
int set_memory_rw(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(0),
|
|
__pgprot(L_PTE_RDONLY));
|
|
}
|
|
|
|
int set_memory_nx(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(L_PTE_XN),
|
|
__pgprot(0));
|
|
}
|
|
|
|
int set_memory_x(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(0),
|
|
__pgprot(L_PTE_XN));
|
|
}
|