2008-01-30 12:33:41 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2002 Andi Kleen, SuSE Labs.
|
2005-04-16 22:20:36 +00:00
|
|
|
* Thanks to Ben LaHaise for precious feedback.
|
2008-01-30 12:33:41 +00:00
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/module.h>
|
2008-01-30 12:33:41 +00:00
|
|
|
#include <linux/sched.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/slab.h>
|
2008-01-30 12:33:41 +00:00
|
|
|
#include <linux/mm.h>
|
|
|
|
|
2008-01-30 12:34:03 +00:00
|
|
|
void clflush_cache_range(void *addr, int size)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
|
|
|
|
clflush(addr+i);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/tlbflush.h>
|
2006-01-06 08:12:10 +00:00
|
|
|
#include <asm/sections.h>
|
2008-01-30 12:33:41 +00:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/pgalloc.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:43 +00:00
|
|
|
pte_t *lookup_address(unsigned long address, int *level)
|
2008-01-30 12:33:41 +00:00
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
pgd_t *pgd = pgd_offset_k(address);
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
2008-01-30 12:33:41 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pgd_none(*pgd))
|
|
|
|
return NULL;
|
|
|
|
pud = pud_offset(pgd, address);
|
|
|
|
if (pud_none(*pud))
|
|
|
|
return NULL;
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
return NULL;
|
2008-01-30 12:33:59 +00:00
|
|
|
*level = 3;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pmd_large(*pmd))
|
|
|
|
return (pte_t *)pmd;
|
2008-01-30 12:33:59 +00:00
|
|
|
*level = 4;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:41 +00:00
|
|
|
return pte_offset_kernel(pmd, address);
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:33:57 +00:00
|
|
|
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
|
2008-01-30 12:33:41 +00:00
|
|
|
{
|
|
|
|
/* change init_mm */
|
|
|
|
set_pte_atomic(kpte, pte);
|
2008-01-30 12:34:03 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
[PATCH] i386: PARAVIRT: Allow paravirt backend to choose kernel PMD sharing
Normally when running in PAE mode, the 4th PMD maps the kernel address space,
which can be shared among all processes (since they all need the same kernel
mappings).
Xen, however, does not allow guests to have the kernel pmd shared between page
tables, so parameterize pgtable.c to allow both modes of operation.
There are several side-effects of this. One is that vmalloc will update the
kernel address space mappings, and those updates need to be propagated into
all processes if the kernel mappings are not intrinsically shared. In the
non-PAE case, this is done by maintaining a pgd_list of all processes; this
list is used when all process pagetables must be updated. pgd_list is
threaded via otherwise unused entries in the page structure for the pgd, which
means that the pgd must be page-sized for this to work.
Normally the PAE pgd is only 4x64 byte entries large, but Xen requires the PAE
pgd to page aligned anyway, so this patch forces the pgd to be page
aligned+sized when the kernel pmd is unshared, to accomodate both these
requirements.
Also, since there may be several distinct kernel pmds (if the user/kernel
split is below 3G), there's no point in allocating them from a slab cache;
they're just allocated with get_free_page and initialized appropriately. (Of
course the could be cached if there is just a single kernel pmd - which is the
default with a 3G user/kernel split - but it doesn't seem worthwhile to add
yet another case into this code).
[ Many thanks to wli for review comments. ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Christoph Lameter <clameter@sgi.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2007-05-02 17:27:13 +00:00
|
|
|
if (SHARED_KERNEL_PMD)
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
2008-01-30 12:34:03 +00:00
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
for (page = pgd_list; page; page = (struct page *)page->index) {
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
|
|
|
|
pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
|
|
|
pud = pud_offset(pgd, address);
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
|
set_pte_atomic((pte_t *)pmd, pte);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-01-30 12:34:03 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:33:57 +00:00
|
|
|
static int split_large_page(pte_t *kpte, unsigned long address)
|
2008-01-30 12:33:56 +00:00
|
|
|
{
|
2008-01-30 12:33:57 +00:00
|
|
|
pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
|
2008-01-30 12:33:58 +00:00
|
|
|
gfp_t gfp_flags = GFP_KERNEL;
|
2008-01-30 12:33:57 +00:00
|
|
|
unsigned long flags;
|
2008-01-30 12:33:56 +00:00
|
|
|
unsigned long addr;
|
|
|
|
pte_t *pbase, *tmp;
|
|
|
|
struct page *base;
|
2008-01-30 12:33:57 +00:00
|
|
|
int i, level;
|
2008-01-30 12:33:56 +00:00
|
|
|
|
2008-01-30 12:33:58 +00:00
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
|
gfp_flags = GFP_ATOMIC;
|
|
|
|
#endif
|
|
|
|
base = alloc_pages(gfp_flags, 0);
|
2008-01-30 12:33:56 +00:00
|
|
|
if (!base)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2008-01-30 12:33:57 +00:00
|
|
|
spin_lock_irqsave(&pgd_lock, flags);
|
2008-01-30 12:33:56 +00:00
|
|
|
/*
|
|
|
|
* Check for races, another CPU might have split this page
|
|
|
|
* up for us already:
|
|
|
|
*/
|
|
|
|
tmp = lookup_address(address, &level);
|
2008-01-30 12:33:56 +00:00
|
|
|
if (tmp != kpte) {
|
|
|
|
WARN_ON_ONCE(1);
|
2008-01-30 12:33:56 +00:00
|
|
|
goto out_unlock;
|
2008-01-30 12:33:56 +00:00
|
|
|
}
|
2008-01-30 12:33:56 +00:00
|
|
|
|
|
|
|
address = __pa(address);
|
|
|
|
addr = address & LARGE_PAGE_MASK;
|
|
|
|
pbase = (pte_t *)page_address(base);
|
2008-01-30 12:34:03 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-01-30 12:33:56 +00:00
|
|
|
paravirt_alloc_pt(&init_mm, page_to_pfn(base));
|
2008-01-30 12:34:03 +00:00
|
|
|
#endif
|
2008-01-30 12:33:56 +00:00
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
|
|
|
|
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install the new, split up pagetable:
|
|
|
|
*/
|
2008-01-30 12:33:57 +00:00
|
|
|
__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
|
2008-01-30 12:33:56 +00:00
|
|
|
base = NULL;
|
|
|
|
|
|
|
|
out_unlock:
|
2008-01-30 12:33:57 +00:00
|
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
2008-01-30 12:33:56 +00:00
|
|
|
|
|
|
|
if (base)
|
|
|
|
__free_pages(base, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:34:03 +00:00
|
|
|
static int
|
|
|
|
__change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
|
2008-01-30 12:33:41 +00:00
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
struct page *kpte_page;
|
2008-01-30 12:33:56 +00:00
|
|
|
int level, err = 0;
|
2008-01-30 12:33:41 +00:00
|
|
|
pte_t *kpte;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
BUG_ON(PageHighMem(page));
|
|
|
|
|
2008-01-30 12:33:55 +00:00
|
|
|
repeat:
|
2008-01-30 12:33:43 +00:00
|
|
|
kpte = lookup_address(address, &level);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!kpte)
|
|
|
|
return -EINVAL;
|
2008-01-30 12:33:41 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
kpte_page = virt_to_page(kpte);
|
2007-07-21 15:09:51 +00:00
|
|
|
BUG_ON(PageLRU(kpte_page));
|
|
|
|
BUG_ON(PageCompound(kpte_page));
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2008-01-30 12:33:55 +00:00
|
|
|
* Better fail early if someone sets the kernel text to NX.
|
|
|
|
* Does not cover __inittext
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-30 12:33:55 +00:00
|
|
|
BUG_ON(address >= (unsigned long)&_text &&
|
|
|
|
address < (unsigned long)&_etext &&
|
|
|
|
(pgprot_val(prot) & _PAGE_NX));
|
2007-07-21 15:09:51 +00:00
|
|
|
|
2008-01-30 12:33:59 +00:00
|
|
|
if (level == 4) {
|
2008-01-30 12:33:57 +00:00
|
|
|
set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
|
2008-01-30 12:33:55 +00:00
|
|
|
} else {
|
2008-01-30 12:33:57 +00:00
|
|
|
err = split_large_page(kpte, address);
|
2008-01-30 12:33:56 +00:00
|
|
|
if (!err)
|
|
|
|
goto repeat;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-01-30 12:33:56 +00:00
|
|
|
return err;
|
2008-01-30 12:33:41 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:34:03 +00:00
|
|
|
/**
|
|
|
|
* change_page_attr_addr - Change page table attributes in linear mapping
|
|
|
|
* @address: Virtual address in linear mapping.
|
|
|
|
* @numpages: Number of pages to change
|
|
|
|
* @prot: New page table attribute (PAGE_*)
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2008-01-30 12:34:03 +00:00
|
|
|
* Change page attributes of a page in the direct mapping. This is a variant
|
|
|
|
* of change_page_attr() that also works on memory holes that do not have
|
|
|
|
* mem_map entry (pfn_valid() is false).
|
2008-01-30 12:33:41 +00:00
|
|
|
*
|
2008-01-30 12:34:03 +00:00
|
|
|
* See change_page_attr() documentation for more details.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-30 12:34:03 +00:00
|
|
|
|
|
|
|
int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:34:03 +00:00
|
|
|
int err = 0, kernel_map = 0, i;
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (address >= __START_KERNEL_map &&
|
|
|
|
address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:34:03 +00:00
|
|
|
address = (unsigned long)__va(__pa(address));
|
|
|
|
kernel_map = 1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
|
|
|
|
unsigned long pfn = __pa(address) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
if (!kernel_map || pte_present(pfn_pte(0, prot))) {
|
|
|
|
err = __change_page_attr(address, pfn_to_page(pfn), prot);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/*
|
|
|
|
* Handle kernel mapping too which aliases part of
|
|
|
|
* lowmem:
|
|
|
|
*/
|
|
|
|
if (__pa(address) < KERNEL_TEXT_SIZE) {
|
|
|
|
unsigned long addr2;
|
|
|
|
pgprot_t prot2;
|
|
|
|
|
|
|
|
addr2 = __START_KERNEL_map + __pa(address);
|
|
|
|
/* Make sure the kernel mappings stay executable */
|
|
|
|
prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
|
|
|
|
err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
|
|
|
|
}
|
|
|
|
#endif
|
2008-01-30 12:33:41 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:34:03 +00:00
|
|
|
/**
|
|
|
|
* change_page_attr - Change page table attributes in the linear mapping.
|
|
|
|
* @page: First page to change
|
|
|
|
* @numpages: Number of pages to change
|
|
|
|
* @prot: New protection/caching type (PAGE_*)
|
|
|
|
*
|
|
|
|
* Returns 0 on success, otherwise a negated errno.
|
|
|
|
*
|
|
|
|
* This should be used when a page is mapped with a different caching policy
|
|
|
|
* than write-back somewhere - some CPUs do not like it when mappings with
|
|
|
|
* different caching policies exist. This changes the page attributes of the
|
|
|
|
* in kernel linear mapping too.
|
|
|
|
*
|
|
|
|
* Caller must call global_flush_tlb() later to make the changes active.
|
|
|
|
*
|
|
|
|
* The caller needs to ensure that there are no conflicting mappings elsewhere
|
|
|
|
* (e.g. in user space) * This function only deals with the kernel linear map.
|
|
|
|
*
|
|
|
|
* For MMIO areas without mem_map use change_page_attr_addr() instead.
|
|
|
|
*/
|
|
|
|
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
|
2006-06-23 09:05:55 +00:00
|
|
|
{
|
2008-01-30 12:34:03 +00:00
|
|
|
unsigned long addr = (unsigned long)page_address(page);
|
2008-01-30 12:33:56 +00:00
|
|
|
|
2008-01-30 12:34:03 +00:00
|
|
|
return change_page_attr_addr(addr, numpages, prot);
|
2008-01-30 12:33:55 +00:00
|
|
|
}
|
2008-01-30 12:34:03 +00:00
|
|
|
EXPORT_SYMBOL(change_page_attr);
|
2008-01-30 12:33:55 +00:00
|
|
|
|
|
|
|
static void flush_kernel_map(void *arg)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Flush all to work around Errata in early athlons regarding
|
|
|
|
* large page flushing.
|
|
|
|
*/
|
|
|
|
__flush_tlb_all();
|
|
|
|
|
|
|
|
if (boot_cpu_data.x86_model >= 4)
|
|
|
|
wbinvd();
|
|
|
|
}
|
|
|
|
|
|
|
|
void global_flush_tlb(void)
|
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
BUG_ON(irqs_disabled());
|
|
|
|
|
2008-01-30 12:33:55 +00:00
|
|
|
on_each_cpu(flush_kernel_map, NULL, 1, 1);
|
2006-06-23 09:05:55 +00:00
|
|
|
}
|
2008-01-30 12:33:41 +00:00
|
|
|
EXPORT_SYMBOL(global_flush_tlb);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
|
void kernel_map_pages(struct page *page, int numpages, int enable)
|
|
|
|
{
|
|
|
|
if (PageHighMem(page))
|
|
|
|
return;
|
2008-01-30 12:33:41 +00:00
|
|
|
if (!enable) {
|
2006-06-27 09:54:49 +00:00
|
|
|
debug_check_no_locks_freed(page_address(page),
|
|
|
|
numpages * PAGE_SIZE);
|
2008-01-30 12:33:41 +00:00
|
|
|
}
|
2006-01-09 23:59:21 +00:00
|
|
|
|
2008-01-30 12:33:58 +00:00
|
|
|
/*
|
|
|
|
* If page allocator is not up yet then do not call c_p_a():
|
|
|
|
*/
|
|
|
|
if (!debug_pagealloc_enabled)
|
|
|
|
return;
|
|
|
|
|
2008-01-30 12:33:41 +00:00
|
|
|
/*
|
|
|
|
* the return value is ignored - the calls cannot fail,
|
2005-04-16 22:20:36 +00:00
|
|
|
* large pages are disabled at boot time.
|
|
|
|
*/
|
|
|
|
change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
|
2008-01-30 12:33:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we should perform an IPI and flush all tlbs,
|
2005-04-16 22:20:36 +00:00
|
|
|
* but that can deadlock->flush only current cpu.
|
|
|
|
*/
|
|
|
|
__flush_tlb_all();
|
|
|
|
}
|
|
|
|
#endif
|