mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 00:52:01 +00:00
37dd2badcf
Added support to allow an 85xx kernel to be run from a non-zero physical address (useful for cooperative asymmetric multiprocessing situations and kdump). The support can be configured at compile time by setting CONFIG_PAGE_OFFSET, CONFIG_KERNEL_START, and CONFIG_PHYSICAL_START as desired. Alternatively, the kernel build can set CONFIG_RELOCATABLE. Setting this config option causes the kernel to determine at runtime the physical addresses of CONFIG_PAGE_OFFSET and CONFIG_KERNEL_START. If CONFIG_RELOCATABLE is set, then CONFIG_PHYSICAL_START has no meaning. However, CONFIG_PHYSICAL_START will always be used to set the LOAD program header physical address field in the resulting ELF image. Currently we are limited to running at a physical address that is a multiple of 256M. This is due to how we map TLBs to cover lowmem. This should be fixed to allow 64M or maybe even 16M alignment in the future. It is considered an error to try and run a kernel at a non-aligned physical address. All the magic for this support is accomplished by proper initialization of the kernel memory subsystem and use of ARCH_PFN_OFFSET. The use of ARCH_PFN_OFFSET only affects normal memory and not IO mappings. ioremap uses map_page and isn't affected by ARCH_PFN_OFFSET. /dev/mem continues to allow access to any physical address in the system regardless of how CONFIG_PHYSICAL_START is set. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
39 lines
1.0 KiB
C
39 lines
1.0 KiB
C
#ifndef _ASM_POWERPC_PAGE_32_H
|
|
#define _ASM_POWERPC_PAGE_32_H
|
|
|
|
#if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
|
|
#if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
|
|
#error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
|
|
#endif
|
|
#endif
|
|
|
|
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
|
|
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
/*
|
|
* The basic type of a PTE - 64 bits for those CPUs with > 32 bit
|
|
* physical addressing. For now this just the IBM PPC440.
|
|
*/
|
|
#ifdef CONFIG_PTE_64BIT
|
|
typedef unsigned long long pte_basic_t;
|
|
#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
|
|
#else
|
|
typedef unsigned long pte_basic_t;
|
|
#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
|
|
#endif
|
|
|
|
struct page;
|
|
extern void clear_pages(void *page, int order);
|
|
static inline void clear_page(void *page) { clear_pages(page, 0); }
|
|
extern void copy_page(void *to, void *from);
|
|
|
|
#include <asm-generic/page.h>
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_POWERPC_PAGE_32_H */
|