MIPS: init: Drop boot_mem_map
boot_mem_map was introduced very early and cannot handle memory maps with nid. Nowadays, memblock can exactly replace boot_mem_map. Detect pfn info and setup resources with memblock maps. Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com> [paul.burton@mips.com: Fix size calculation in check_kernel_sections_mem] Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org Cc: yasha.che3@gmail.com Cc: aurelien@aurel32.net Cc: sfr@canb.auug.org.au Cc: fancer.lancer@gmail.com Cc: matt.redfearn@mips.com Cc: chenhc@lemote.com
This commit is contained in:
parent
a5718fe8f7
commit
a94e4f24ec
@ -88,28 +88,12 @@ const char *get_system_type(void);
|
|||||||
|
|
||||||
extern unsigned long mips_machtype;
|
extern unsigned long mips_machtype;
|
||||||
|
|
||||||
#define BOOT_MEM_MAP_MAX 32
|
|
||||||
#define BOOT_MEM_RAM 1
|
#define BOOT_MEM_RAM 1
|
||||||
#define BOOT_MEM_ROM_DATA 2
|
#define BOOT_MEM_ROM_DATA 2
|
||||||
#define BOOT_MEM_RESERVED 3
|
#define BOOT_MEM_RESERVED 3
|
||||||
#define BOOT_MEM_INIT_RAM 4
|
#define BOOT_MEM_INIT_RAM 4
|
||||||
#define BOOT_MEM_NOMAP 5
|
#define BOOT_MEM_NOMAP 5
|
||||||
|
|
||||||
/*
|
|
||||||
* A memory map that's built upon what was determined
|
|
||||||
* or specified on the command line.
|
|
||||||
*/
|
|
||||||
struct boot_mem_map {
|
|
||||||
int nr_map;
|
|
||||||
struct boot_mem_map_entry {
|
|
||||||
phys_addr_t addr; /* start of memory segment */
|
|
||||||
phys_addr_t size; /* size of memory segment */
|
|
||||||
long type; /* type of memory segment */
|
|
||||||
} map[BOOT_MEM_MAP_MAX];
|
|
||||||
};
|
|
||||||
|
|
||||||
extern struct boot_mem_map boot_mem_map;
|
|
||||||
|
|
||||||
extern void add_memory_region(phys_addr_t start, phys_addr_t size, long type);
|
extern void add_memory_region(phys_addr_t start, phys_addr_t size, long type);
|
||||||
extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max);
|
extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max);
|
||||||
|
|
||||||
|
@ -63,8 +63,6 @@ unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
|
|||||||
|
|
||||||
EXPORT_SYMBOL(mips_machtype);
|
EXPORT_SYMBOL(mips_machtype);
|
||||||
|
|
||||||
struct boot_mem_map boot_mem_map;
|
|
||||||
|
|
||||||
static char __initdata command_line[COMMAND_LINE_SIZE];
|
static char __initdata command_line[COMMAND_LINE_SIZE];
|
||||||
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
|
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
|
||||||
|
|
||||||
@ -92,8 +90,10 @@ EXPORT_SYMBOL(ARCH_PFN_OFFSET);
|
|||||||
|
|
||||||
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
|
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
|
||||||
{
|
{
|
||||||
int x = boot_mem_map.nr_map;
|
/*
|
||||||
int i;
|
* Note: This function only exists for historical reason,
|
||||||
|
* new code should use memblock_add or memblock_add_node instead.
|
||||||
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the region reaches the top of the physical address space, adjust
|
* If the region reaches the top of the physical address space, adjust
|
||||||
@ -108,38 +108,20 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
memblock_add(start, size);
|
||||||
* Try to merge with existing entry, if any.
|
/* Reserve any memory except the ordinary RAM ranges. */
|
||||||
*/
|
switch (type) {
|
||||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
case BOOT_MEM_RAM:
|
||||||
struct boot_mem_map_entry *entry = boot_mem_map.map + i;
|
break;
|
||||||
unsigned long top;
|
|
||||||
|
|
||||||
if (entry->type != type)
|
case BOOT_MEM_NOMAP: /* Discard the range from the system. */
|
||||||
continue;
|
memblock_remove(start, size);
|
||||||
|
break;
|
||||||
|
|
||||||
if (start + size < entry->addr)
|
default: /* Reserve the rest of the memory types at boot time */
|
||||||
continue; /* no overlap */
|
memblock_reserve(start, size);
|
||||||
|
break;
|
||||||
if (entry->addr + entry->size < start)
|
|
||||||
continue; /* no overlap */
|
|
||||||
|
|
||||||
top = max(entry->addr + entry->size, start + size);
|
|
||||||
entry->addr = min(entry->addr, start);
|
|
||||||
entry->size = top - entry->addr;
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
|
|
||||||
pr_err("Ooops! Too many entries in the memory map!\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
boot_mem_map.map[x].addr = start;
|
|
||||||
boot_mem_map.map[x].size = size;
|
|
||||||
boot_mem_map.map[x].type = type;
|
|
||||||
boot_mem_map.nr_map++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
|
void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
|
||||||
@ -161,70 +143,6 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add
|
|||||||
add_memory_region(start, size, BOOT_MEM_RAM);
|
add_memory_region(start, size, BOOT_MEM_RAM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __init __maybe_unused memory_region_available(phys_addr_t start,
|
|
||||||
phys_addr_t size)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
bool in_ram = false, free = true;
|
|
||||||
|
|
||||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
||||||
phys_addr_t start_, end_;
|
|
||||||
|
|
||||||
start_ = boot_mem_map.map[i].addr;
|
|
||||||
end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
|
|
||||||
|
|
||||||
switch (boot_mem_map.map[i].type) {
|
|
||||||
case BOOT_MEM_RAM:
|
|
||||||
if (start >= start_ && start + size <= end_)
|
|
||||||
in_ram = true;
|
|
||||||
break;
|
|
||||||
case BOOT_MEM_RESERVED:
|
|
||||||
case BOOT_MEM_NOMAP:
|
|
||||||
if ((start >= start_ && start < end_) ||
|
|
||||||
(start < start_ && start + size >= start_))
|
|
||||||
free = false;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return in_ram && free;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init print_memory_map(void)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
const int field = 2 * sizeof(unsigned long);
|
|
||||||
|
|
||||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
||||||
printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
|
|
||||||
field, (unsigned long long) boot_mem_map.map[i].size,
|
|
||||||
field, (unsigned long long) boot_mem_map.map[i].addr);
|
|
||||||
|
|
||||||
switch (boot_mem_map.map[i].type) {
|
|
||||||
case BOOT_MEM_RAM:
|
|
||||||
printk(KERN_CONT "(usable)\n");
|
|
||||||
break;
|
|
||||||
case BOOT_MEM_INIT_RAM:
|
|
||||||
printk(KERN_CONT "(usable after init)\n");
|
|
||||||
break;
|
|
||||||
case BOOT_MEM_ROM_DATA:
|
|
||||||
printk(KERN_CONT "(ROM data)\n");
|
|
||||||
break;
|
|
||||||
case BOOT_MEM_RESERVED:
|
|
||||||
printk(KERN_CONT "(reserved)\n");
|
|
||||||
break;
|
|
||||||
case BOOT_MEM_NOMAP:
|
|
||||||
printk(KERN_CONT "(nomap)\n");
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Manage initrd
|
* Manage initrd
|
||||||
*/
|
*/
|
||||||
@ -376,8 +294,11 @@ static void __init bootmem_init(void)
|
|||||||
|
|
||||||
static void __init bootmem_init(void)
|
static void __init bootmem_init(void)
|
||||||
{
|
{
|
||||||
phys_addr_t ramstart = PHYS_ADDR_MAX;
|
struct memblock_region *mem;
|
||||||
int i;
|
phys_addr_t ramstart, ramend;
|
||||||
|
|
||||||
|
ramstart = memblock_start_of_DRAM();
|
||||||
|
ramend = memblock_end_of_DRAM();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sanity check any INITRD first. We don't take it into account
|
* Sanity check any INITRD first. We don't take it into account
|
||||||
@ -391,47 +312,7 @@ static void __init bootmem_init(void)
|
|||||||
memblock_reserve(__pa_symbol(&_text),
|
memblock_reserve(__pa_symbol(&_text),
|
||||||
__pa_symbol(&_end) - __pa_symbol(&_text));
|
__pa_symbol(&_end) - __pa_symbol(&_text));
|
||||||
|
|
||||||
/*
|
/* max_low_pfn is not a number of pages but the end pfn of low mem */
|
||||||
* max_low_pfn is not a number of pages. The number of pages
|
|
||||||
* of the system is given by 'max_low_pfn - min_low_pfn'.
|
|
||||||
*/
|
|
||||||
min_low_pfn = ~0UL;
|
|
||||||
max_low_pfn = 0;
|
|
||||||
|
|
||||||
/* Find the highest and lowest page frame numbers we have available. */
|
|
||||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
||||||
unsigned long start, end;
|
|
||||||
|
|
||||||
if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
start = PFN_UP(boot_mem_map.map[i].addr);
|
|
||||||
end = PFN_DOWN(boot_mem_map.map[i].addr
|
|
||||||
+ boot_mem_map.map[i].size);
|
|
||||||
|
|
||||||
ramstart = min(ramstart, boot_mem_map.map[i].addr);
|
|
||||||
|
|
||||||
#ifndef CONFIG_HIGHMEM
|
|
||||||
/*
|
|
||||||
* Skip highmem here so we get an accurate max_low_pfn if low
|
|
||||||
* memory stops short of high memory.
|
|
||||||
* If the region overlaps HIGHMEM_START, end is clipped so
|
|
||||||
* max_pfn excludes the highmem portion.
|
|
||||||
*/
|
|
||||||
if (start >= PFN_DOWN(HIGHMEM_START))
|
|
||||||
continue;
|
|
||||||
if (end > PFN_DOWN(HIGHMEM_START))
|
|
||||||
end = PFN_DOWN(HIGHMEM_START);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (end > max_low_pfn)
|
|
||||||
max_low_pfn = end;
|
|
||||||
if (start < min_low_pfn)
|
|
||||||
min_low_pfn = start;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (min_low_pfn >= max_low_pfn)
|
|
||||||
panic("Incorrect memory mapping !!!");
|
|
||||||
|
|
||||||
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
|
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
|
||||||
ARCH_PFN_OFFSET = PFN_UP(ramstart);
|
ARCH_PFN_OFFSET = PFN_UP(ramstart);
|
||||||
@ -439,74 +320,58 @@ static void __init bootmem_init(void)
|
|||||||
/*
|
/*
|
||||||
* Reserve any memory between the start of RAM and PHYS_OFFSET
|
* Reserve any memory between the start of RAM and PHYS_OFFSET
|
||||||
*/
|
*/
|
||||||
if (ramstart > PHYS_OFFSET) {
|
if (ramstart > PHYS_OFFSET)
|
||||||
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
|
memblock_reserve(PHYS_OFFSET, PFN_UP(ramstart) - PHYS_OFFSET);
|
||||||
BOOT_MEM_RESERVED);
|
|
||||||
memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (min_low_pfn > ARCH_PFN_OFFSET) {
|
if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
|
||||||
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
|
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
|
||||||
(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
|
(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
|
||||||
min_low_pfn - ARCH_PFN_OFFSET);
|
(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
|
||||||
} else if (ARCH_PFN_OFFSET - min_low_pfn > 0UL) {
|
|
||||||
pr_info("%lu free pages won't be used\n",
|
|
||||||
ARCH_PFN_OFFSET - min_low_pfn);
|
|
||||||
}
|
}
|
||||||
min_low_pfn = ARCH_PFN_OFFSET;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
min_low_pfn = ARCH_PFN_OFFSET;
|
||||||
|
max_pfn = PFN_DOWN(ramend);
|
||||||
|
for_each_memblock(memory, mem) {
|
||||||
|
unsigned long start = memblock_region_memory_base_pfn(mem);
|
||||||
|
unsigned long end = memblock_region_memory_end_pfn(mem);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine low and high memory ranges
|
* Skip highmem here so we get an accurate max_low_pfn if low
|
||||||
|
* memory stops short of high memory.
|
||||||
|
* If the region overlaps HIGHMEM_START, end is clipped so
|
||||||
|
* max_pfn excludes the highmem portion.
|
||||||
*/
|
*/
|
||||||
max_pfn = max_low_pfn;
|
if (memblock_is_nomap(mem))
|
||||||
if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
|
continue;
|
||||||
|
if (start >= PFN_DOWN(HIGHMEM_START))
|
||||||
|
continue;
|
||||||
|
if (end > PFN_DOWN(HIGHMEM_START))
|
||||||
|
end = PFN_DOWN(HIGHMEM_START);
|
||||||
|
if (end > max_low_pfn)
|
||||||
|
max_low_pfn = end;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (min_low_pfn >= max_low_pfn)
|
||||||
|
panic("Incorrect memory mapping !!!");
|
||||||
|
|
||||||
|
if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
highstart_pfn = PFN_DOWN(HIGHMEM_START);
|
highstart_pfn = PFN_DOWN(HIGHMEM_START);
|
||||||
highend_pfn = max_low_pfn;
|
highend_pfn = max_pfn;
|
||||||
#endif
|
#else
|
||||||
max_low_pfn = PFN_DOWN(HIGHMEM_START);
|
max_low_pfn = PFN_DOWN(HIGHMEM_START);
|
||||||
}
|
max_pfn = max_low_pfn;
|
||||||
|
|
||||||
/* Install all valid RAM ranges to the memblock memory region */
|
|
||||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
||||||
unsigned long start, end;
|
|
||||||
|
|
||||||
start = PFN_UP(boot_mem_map.map[i].addr);
|
|
||||||
end = PFN_DOWN(boot_mem_map.map[i].addr
|
|
||||||
+ boot_mem_map.map[i].size);
|
|
||||||
|
|
||||||
if (start < min_low_pfn)
|
|
||||||
start = min_low_pfn;
|
|
||||||
#ifndef CONFIG_HIGHMEM
|
|
||||||
/* Ignore highmem regions if highmem is unsupported */
|
|
||||||
if (end > max_low_pfn)
|
|
||||||
end = max_low_pfn;
|
|
||||||
#endif
|
#endif
|
||||||
if (end <= start)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
|
|
||||||
|
|
||||||
/* Reserve any memory except the ordinary RAM ranges. */
|
|
||||||
switch (boot_mem_map.map[i].type) {
|
|
||||||
case BOOT_MEM_RAM:
|
|
||||||
break;
|
|
||||||
case BOOT_MEM_NOMAP: /* Discard the range from the system. */
|
|
||||||
memblock_remove(PFN_PHYS(start), PFN_PHYS(end - start));
|
|
||||||
continue;
|
|
||||||
default: /* Reserve the rest of the memory types at boot time */
|
|
||||||
memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start));
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In any case the added to the memblock memory regions
|
* In any case the added to the memblock memory regions
|
||||||
* (highmem/lowmem, available/reserved, etc) are considered
|
* (highmem/lowmem, available/reserved, etc) are considered
|
||||||
* as present, so inform sparsemem about them.
|
* as present, so inform sparsemem about them.
|
||||||
*/
|
*/
|
||||||
memory_present(0, start, end);
|
memblocks_present();
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reserve initrd memory if needed.
|
* Reserve initrd memory if needed.
|
||||||
@ -528,8 +393,9 @@ static int __init early_parse_mem(char *p)
|
|||||||
* size.
|
* size.
|
||||||
*/
|
*/
|
||||||
if (usermem == 0) {
|
if (usermem == 0) {
|
||||||
boot_mem_map.nr_map = 0;
|
|
||||||
usermem = 1;
|
usermem = 1;
|
||||||
|
memblock_remove(memblock_start_of_DRAM(),
|
||||||
|
memblock_end_of_DRAM() - memblock_start_of_DRAM());
|
||||||
}
|
}
|
||||||
start = 0;
|
start = 0;
|
||||||
size = memparse(p, &p);
|
size = memparse(p, &p);
|
||||||
@ -586,14 +452,13 @@ early_param("memmap", early_parse_memmap);
|
|||||||
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
|
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
|
||||||
static int __init early_parse_elfcorehdr(char *p)
|
static int __init early_parse_elfcorehdr(char *p)
|
||||||
{
|
{
|
||||||
int i;
|
struct memblock_region *mem;
|
||||||
|
|
||||||
setup_elfcorehdr = memparse(p, &p);
|
setup_elfcorehdr = memparse(p, &p);
|
||||||
|
|
||||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
for_each_memblock(memory, mem) {
|
||||||
unsigned long start = boot_mem_map.map[i].addr;
|
unsigned long start = mem->base;
|
||||||
unsigned long end = (boot_mem_map.map[i].addr +
|
unsigned long end = mem->end;
|
||||||
boot_mem_map.map[i].size);
|
|
||||||
if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
|
if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
|
||||||
/*
|
/*
|
||||||
* Reserve from the elf core header to the end of
|
* Reserve from the elf core header to the end of
|
||||||
@ -613,47 +478,20 @@ static int __init early_parse_elfcorehdr(char *p)
|
|||||||
early_param("elfcorehdr", early_parse_elfcorehdr);
|
early_param("elfcorehdr", early_parse_elfcorehdr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
|
|
||||||
{
|
|
||||||
phys_addr_t size;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
size = end - mem;
|
|
||||||
if (!size)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Make sure it is in the boot_mem_map */
|
|
||||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
||||||
if (mem >= boot_mem_map.map[i].addr &&
|
|
||||||
mem < (boot_mem_map.map[i].addr +
|
|
||||||
boot_mem_map.map[i].size))
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
add_memory_region(mem, size, type);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC
|
||||||
static inline unsigned long long get_total_mem(void)
|
|
||||||
{
|
|
||||||
unsigned long long total;
|
|
||||||
|
|
||||||
total = max_pfn - min_low_pfn;
|
|
||||||
return total << PAGE_SHIFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init mips_parse_crashkernel(void)
|
static void __init mips_parse_crashkernel(void)
|
||||||
{
|
{
|
||||||
unsigned long long total_mem;
|
unsigned long long total_mem;
|
||||||
unsigned long long crash_size, crash_base;
|
unsigned long long crash_size, crash_base;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
total_mem = get_total_mem();
|
total_mem = memblock_phys_mem_size();
|
||||||
ret = parse_crashkernel(boot_command_line, total_mem,
|
ret = parse_crashkernel(boot_command_line, total_mem,
|
||||||
&crash_size, &crash_base);
|
&crash_size, &crash_base);
|
||||||
if (ret != 0 || crash_size <= 0)
|
if (ret != 0 || crash_size <= 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!memory_region_available(crash_base, crash_size)) {
|
if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
|
||||||
pr_warn("Invalid memory region reserved for crash kernel\n");
|
pr_warn("Invalid memory region reserved for crash kernel\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -686,6 +524,17 @@ static void __init request_crashkernel(struct resource *res)
|
|||||||
}
|
}
|
||||||
#endif /* !defined(CONFIG_KEXEC) */
|
#endif /* !defined(CONFIG_KEXEC) */
|
||||||
|
|
||||||
|
static void __init check_kernel_sections_mem(void)
|
||||||
|
{
|
||||||
|
phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text)));
|
||||||
|
phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start;
|
||||||
|
|
||||||
|
if (!memblock_is_region_memory(start, size)) {
|
||||||
|
pr_info("Kernel sections are not in the memory maps\n");
|
||||||
|
memblock_add(start, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
|
#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
|
||||||
#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
|
#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
|
||||||
#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
|
#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
|
||||||
@ -731,25 +580,6 @@ static void __init arch_mem_init(char **cmdline_p)
|
|||||||
plat_mem_setup();
|
plat_mem_setup();
|
||||||
memblock_set_bottom_up(true);
|
memblock_set_bottom_up(true);
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure all kernel memory is in the maps. The "UP" and
|
|
||||||
* "DOWN" are opposite for initdata since if it crosses over
|
|
||||||
* into another memory section you don't want that to be
|
|
||||||
* freed when the initdata is freed.
|
|
||||||
*/
|
|
||||||
arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
|
|
||||||
PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
|
|
||||||
BOOT_MEM_RAM);
|
|
||||||
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
|
|
||||||
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
|
|
||||||
BOOT_MEM_INIT_RAM);
|
|
||||||
arch_mem_addpart(PFN_DOWN(__pa_symbol(&__bss_start)) << PAGE_SHIFT,
|
|
||||||
PFN_UP(__pa_symbol(&__bss_stop)) << PAGE_SHIFT,
|
|
||||||
BOOT_MEM_RAM);
|
|
||||||
|
|
||||||
pr_info("Determined physical RAM map:\n");
|
|
||||||
print_memory_map();
|
|
||||||
|
|
||||||
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
|
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
|
||||||
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
||||||
#else
|
#else
|
||||||
@ -783,14 +613,17 @@ static void __init arch_mem_init(char **cmdline_p)
|
|||||||
|
|
||||||
parse_early_param();
|
parse_early_param();
|
||||||
|
|
||||||
if (usermem) {
|
if (usermem)
|
||||||
pr_info("User-defined physical RAM map:\n");
|
pr_info("User-defined physical RAM map overwrite\n");
|
||||||
print_memory_map();
|
|
||||||
}
|
check_kernel_sections_mem();
|
||||||
|
|
||||||
early_init_fdt_reserve_self();
|
early_init_fdt_reserve_self();
|
||||||
early_init_fdt_scan_reserved_mem();
|
early_init_fdt_scan_reserved_mem();
|
||||||
|
|
||||||
|
#ifndef CONFIG_NUMA
|
||||||
|
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
|
||||||
|
#endif
|
||||||
bootmem_init();
|
bootmem_init();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -830,12 +663,12 @@ static void __init arch_mem_init(char **cmdline_p)
|
|||||||
|
|
||||||
memblock_dump_all();
|
memblock_dump_all();
|
||||||
|
|
||||||
early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn));
|
early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init resource_init(void)
|
static void __init resource_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
struct memblock_region *region;
|
||||||
|
|
||||||
if (UNCAC_BASE != IO_BASE)
|
if (UNCAC_BASE != IO_BASE)
|
||||||
return;
|
return;
|
||||||
@ -847,16 +680,10 @@ static void __init resource_init(void)
|
|||||||
bss_resource.start = __pa_symbol(&__bss_start);
|
bss_resource.start = __pa_symbol(&__bss_start);
|
||||||
bss_resource.end = __pa_symbol(&__bss_stop) - 1;
|
bss_resource.end = __pa_symbol(&__bss_stop) - 1;
|
||||||
|
|
||||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
for_each_memblock(memory, region) {
|
||||||
|
phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
|
||||||
|
phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
unsigned long start, end;
|
|
||||||
|
|
||||||
start = boot_mem_map.map[i].addr;
|
|
||||||
end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
|
|
||||||
if (start >= HIGHMEM_START)
|
|
||||||
continue;
|
|
||||||
if (end >= HIGHMEM_START)
|
|
||||||
end = HIGHMEM_START - 1;
|
|
||||||
|
|
||||||
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
|
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
|
||||||
if (!res)
|
if (!res)
|
||||||
@ -865,20 +692,8 @@ static void __init resource_init(void)
|
|||||||
|
|
||||||
res->start = start;
|
res->start = start;
|
||||||
res->end = end;
|
res->end = end;
|
||||||
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||||
|
|
||||||
switch (boot_mem_map.map[i].type) {
|
|
||||||
case BOOT_MEM_RAM:
|
|
||||||
case BOOT_MEM_INIT_RAM:
|
|
||||||
case BOOT_MEM_ROM_DATA:
|
|
||||||
res->name = "System RAM";
|
res->name = "System RAM";
|
||||||
res->flags |= IORESOURCE_SYSRAM;
|
|
||||||
break;
|
|
||||||
case BOOT_MEM_RESERVED:
|
|
||||||
case BOOT_MEM_NOMAP:
|
|
||||||
default:
|
|
||||||
res->name = "reserved";
|
|
||||||
}
|
|
||||||
|
|
||||||
request_resource(&iomem_resource, res);
|
request_resource(&iomem_resource, res);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user