mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 17:12:06 +00:00
72a7fe3967
This patchset adds a flags variable to reserve_bootmem() and uses the BOOTMEM_EXCLUSIVE flag in crashkernel reservation code to detect collisions between crashkernel area and already used memory. This patch: Change the reserve_bootmem() function to accept a new flag BOOTMEM_EXCLUSIVE. If that flag is set, the function returns with -EBUSY if the memory already has been reserved in the past. This is to avoid conflicts. Because that code runs before SMP initialisation, there's no race condition inside reserve_bootmem_core(). [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix powerpc build] Signed-off-by: Bernhard Walle <bwalle@suse.de> Cc: <linux-arch@vger.kernel.org> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
167 lines
3.9 KiB
C
167 lines
3.9 KiB
C
/*
|
|
* linux/arch/m32r/mm/discontig.c
|
|
*
|
|
* Discontig memory support
|
|
*
|
|
* Copyright (c) 2003 Hitoshi Yamamoto
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/initrd.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/module.h>
|
|
#include <linux/pfn.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
extern char _end[];
|
|
|
|
struct pglist_data *node_data[MAX_NUMNODES];
|
|
EXPORT_SYMBOL(node_data);
|
|
static bootmem_data_t node_bdata[MAX_NUMNODES] __initdata;
|
|
|
|
pg_data_t m32r_node_data[MAX_NUMNODES];
|
|
|
|
/* Memory profile */
|
|
typedef struct {
|
|
unsigned long start_pfn;
|
|
unsigned long pages;
|
|
unsigned long holes;
|
|
unsigned long free_pfn;
|
|
} mem_prof_t;
|
|
static mem_prof_t mem_prof[MAX_NUMNODES];
|
|
|
|
static void __init mem_prof_init(void)
|
|
{
|
|
unsigned long start_pfn, holes, free_pfn;
|
|
const unsigned long zone_alignment = 1UL << (MAX_ORDER - 1);
|
|
unsigned long ul;
|
|
mem_prof_t *mp;
|
|
|
|
/* Node#0 SDRAM */
|
|
mp = &mem_prof[0];
|
|
mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
|
|
mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE);
|
|
mp->holes = 0;
|
|
mp->free_pfn = PFN_UP(__pa(_end));
|
|
|
|
/* Node#1 internal SRAM */
|
|
mp = &mem_prof[1];
|
|
start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START);
|
|
holes = 0;
|
|
if (start_pfn & (zone_alignment - 1)) {
|
|
ul = zone_alignment;
|
|
while (start_pfn >= ul)
|
|
ul += zone_alignment;
|
|
|
|
start_pfn = ul - zone_alignment;
|
|
holes = free_pfn - start_pfn;
|
|
}
|
|
|
|
mp->start_pfn = start_pfn;
|
|
mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes;
|
|
mp->holes = holes;
|
|
mp->free_pfn = PFN_UP(CONFIG_IRAM_START);
|
|
}
|
|
|
|
unsigned long __init setup_memory(void)
|
|
{
|
|
unsigned long bootmap_size;
|
|
unsigned long min_pfn;
|
|
int nid;
|
|
mem_prof_t *mp;
|
|
|
|
max_low_pfn = 0;
|
|
min_low_pfn = -1;
|
|
|
|
mem_prof_init();
|
|
|
|
for_each_online_node(nid) {
|
|
mp = &mem_prof[nid];
|
|
NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid];
|
|
NODE_DATA(nid)->bdata = &node_bdata[nid];
|
|
min_pfn = mp->start_pfn;
|
|
max_pfn = mp->start_pfn + mp->pages;
|
|
bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn,
|
|
mp->start_pfn, max_pfn);
|
|
|
|
free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
|
|
PFN_PHYS(mp->pages));
|
|
|
|
reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
|
|
PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size,
|
|
BOOTMEM_DEFAULT);
|
|
|
|
if (max_low_pfn < max_pfn)
|
|
max_low_pfn = max_pfn;
|
|
|
|
if (min_low_pfn > min_pfn)
|
|
min_low_pfn = min_pfn;
|
|
}
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
if (LOADER_TYPE && INITRD_START) {
|
|
if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
|
|
reserve_bootmem_node(NODE_DATA(0), INITRD_START,
|
|
INITRD_SIZE, BOOTMEM_DEFAULT);
|
|
initrd_start = INITRD_START + PAGE_OFFSET;
|
|
initrd_end = initrd_start + INITRD_SIZE;
|
|
printk("initrd:start[%08lx],size[%08lx]\n",
|
|
initrd_start, INITRD_SIZE);
|
|
} else {
|
|
printk("initrd extends beyond end of memory "
|
|
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
|
|
INITRD_START + INITRD_SIZE,
|
|
PFN_PHYS(max_low_pfn));
|
|
|
|
initrd_start = 0;
|
|
}
|
|
}
|
|
#endif /* CONFIG_BLK_DEV_INITRD */
|
|
|
|
return max_low_pfn;
|
|
}
|
|
|
|
#define START_PFN(nid) \
|
|
(NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT)
|
|
#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
|
|
|
|
unsigned long __init zone_sizes_init(void)
|
|
{
|
|
unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES];
|
|
unsigned long low, start_pfn;
|
|
unsigned long holes = 0;
|
|
int nid, i;
|
|
mem_prof_t *mp;
|
|
|
|
for_each_online_node(nid) {
|
|
mp = &mem_prof[nid];
|
|
for (i = 0 ; i < MAX_NR_ZONES ; i++) {
|
|
zones_size[i] = 0;
|
|
zholes_size[i] = 0;
|
|
}
|
|
start_pfn = START_PFN(nid);
|
|
low = MAX_LOW_PFN(nid);
|
|
zones_size[ZONE_DMA] = low - start_pfn;
|
|
zholes_size[ZONE_DMA] = mp->holes;
|
|
holes += zholes_size[ZONE_DMA];
|
|
|
|
free_area_init_node(nid, NODE_DATA(nid), zones_size,
|
|
start_pfn, zholes_size);
|
|
}
|
|
|
|
/*
|
|
* For test
|
|
* Use all area of internal RAM.
|
|
* see __alloc_pages()
|
|
*/
|
|
NODE_DATA(1)->node_zones->pages_min = 0;
|
|
NODE_DATA(1)->node_zones->pages_low = 0;
|
|
NODE_DATA(1)->node_zones->pages_high = 0;
|
|
|
|
return holes;
|
|
}
|
|
|