forked from Minki/linux
x86: create a non-zero sized bm_pte only when needed
Impact: kernel image size reduction Since in most configurations the pmd page needed maps the same range of virtual addresses which is also mapped by the earlier inserted one for covering FIX_DBGP_BASE, that page (and its insertion in the page tables) can be avoided altogether by detecting the condition at compile time. Signed-off-by: Jan Beulich <jbeulich@novell.com> LKML-Reference: <49B91826.76E4.0078.0@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
5c0e6f035d
commit
698609bdcd
@ -487,7 +487,12 @@ static int __init early_ioremap_debug_setup(char *str)
|
|||||||
early_param("early_ioremap_debug", early_ioremap_debug_setup);
|
early_param("early_ioremap_debug", early_ioremap_debug_setup);
|
||||||
|
|
||||||
static __initdata int after_paging_init;
|
static __initdata int after_paging_init;
|
||||||
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
|
#define __FIXADDR_TOP (-PAGE_SIZE)
|
||||||
|
static pte_t bm_pte[(__fix_to_virt(FIX_DBGP_BASE)
|
||||||
|
^ __fix_to_virt(FIX_BTMAP_BEGIN)) >> PMD_SHIFT
|
||||||
|
? PAGE_SIZE / sizeof(pte_t) : 0] __page_aligned_bss;
|
||||||
|
#undef __FIXADDR_TOP
|
||||||
|
static __initdata pte_t *bm_ptep;
|
||||||
|
|
||||||
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
||||||
{
|
{
|
||||||
@ -502,6 +507,8 @@ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
|||||||
|
|
||||||
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
|
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
|
||||||
{
|
{
|
||||||
|
if (!sizeof(bm_pte))
|
||||||
|
return &bm_ptep[pte_index(addr)];
|
||||||
return &bm_pte[pte_index(addr)];
|
return &bm_pte[pte_index(addr)];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -519,8 +526,14 @@ void __init early_ioremap_init(void)
|
|||||||
slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
|
slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
|
||||||
|
|
||||||
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
|
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
|
||||||
memset(bm_pte, 0, sizeof(bm_pte));
|
if (sizeof(bm_pte)) {
|
||||||
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
memset(bm_pte, 0, sizeof(bm_pte));
|
||||||
|
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
||||||
|
} else {
|
||||||
|
bm_ptep = pte_offset_kernel(pmd, 0);
|
||||||
|
if (early_ioremap_debug)
|
||||||
|
printk(KERN_INFO "bm_ptep=%p\n", bm_ptep);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The boot-ioremap range spans multiple pmds, for which
|
* The boot-ioremap range spans multiple pmds, for which
|
||||||
|
Loading…
Reference in New Issue
Block a user