forked from Minki/linux
64b3d0e812
Currently, we never set _PAGE_COHERENT in the PTEs, we just OR it in in the hash code based on some CPU feature bit. We also manipulate _PAGE_NO_CACHE and _PAGE_GUARDED by hand in all sorts of places. This changes the logic so that instead, the PTE now contains _PAGE_COHERENT for all normal RAM pages thay have I = 0 on platforms that need it. The hash code clears it if the feature bit is not set. It also adds some clean accessors to setup various valid combinations of access flags and change various bits of code to use them instead. This should help having the PTE actually containing the bit combinations that we really want. I also removed _PAGE_GUARDED from _PAGE_BASE on 44x and instead set it explicitely from the TLB miss. I will ultimately remove it completely as it appears that it might not be needed after all but in the meantime, having it in the TLB miss makes things a lot easier. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
84 lines
2.5 KiB
C
84 lines
2.5 KiB
C
#ifndef _ASM_POWERPC_PGTABLE_H
|
|
#define _ASM_POWERPC_PGTABLE_H
|
|
#ifdef __KERNEL__
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <asm/processor.h> /* For TASK_SIZE */
|
|
#include <asm/mmu.h>
|
|
#include <asm/page.h>
|
|
struct mm_struct;
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#if defined(CONFIG_PPC64)
|
|
# include <asm/pgtable-ppc64.h>
|
|
#else
|
|
# include <asm/pgtable-ppc32.h>
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/*
|
|
* Macro to mark a page protection value as "uncacheable".
|
|
*/
|
|
|
|
#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
|
|
_PAGE_WRITETHRU)
|
|
|
|
#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
|
_PAGE_NO_CACHE | _PAGE_GUARDED))
|
|
|
|
#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
|
_PAGE_NO_CACHE))
|
|
|
|
#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
|
_PAGE_COHERENT))
|
|
|
|
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
|
_PAGE_COHERENT | _PAGE_WRITETHRU))
|
|
|
|
|
|
struct file;
|
|
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
unsigned long size, pgprot_t vma_prot);
|
|
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
|
|
|
/*
|
|
* ZERO_PAGE is a global shared page that is always zero: used
|
|
* for zero-mapped memory areas etc..
|
|
*/
|
|
extern unsigned long empty_zero_page[];
|
|
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
|
|
|
extern pgd_t swapper_pg_dir[];
|
|
|
|
extern void paging_init(void);
|
|
|
|
/*
|
|
* kern_addr_valid is intended to indicate whether an address is a valid
|
|
* kernel address. Most 32-bit archs define it as always true (like this)
|
|
* but most 64-bit archs actually perform a test. What should we do here?
|
|
*/
|
|
#define kern_addr_valid(addr) (1)
|
|
|
|
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
|
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
|
|
|
#include <asm-generic/pgtable.h>
|
|
|
|
|
|
/*
|
|
* This gets called at the end of handling a page fault, when
|
|
* the kernel has put a new PTE into the page table for the process.
|
|
* We use it to ensure coherency between the i-cache and d-cache
|
|
* for the page which has just been mapped in.
|
|
* On machines which use an MMU hash table, we use this to put a
|
|
* corresponding HPTE into the hash table ahead of time, instead of
|
|
* waiting for the inevitable extra hash-table miss exception.
|
|
*/
|
|
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_PGTABLE_H */
|