[POWERPC] DEBUG_PAGEALLOC for 32-bit

Here's an implementation of DEBUG_PAGEALLOC for ppc32. It disables BAT
mapping and is only tested with Hash table based processor though it
shouldn't be too hard to adapt it to others.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

 arch/powerpc/Kconfig.debug       |    9 ++++++
 arch/powerpc/mm/init_32.c        |    4 +++
 arch/powerpc/mm/pgtable_32.c     |   52 +++++++++++++++++++++++++++++++++++++++
 arch/powerpc/mm/ppc_mmu_32.c     |    4 ++-
 include/asm-powerpc/cacheflush.h |    6 ++++
 5 files changed, 74 insertions(+), 1 deletion(-)
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Benjamin Herrenschmidt 2007-04-12 15:30:22 +10:00 committed by Paul Mackerras
parent ee4f2ea486
commit 88df6e90fa
5 changed files with 74 additions and 1 deletions

View File

@ -18,6 +18,15 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat.
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && PPC32
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
of memory corruptions.
config HCALL_STATS
bool "Hypervisor call instrumentation"
depends on PPC_PSERIES && DEBUG_FS

View File

@ -115,6 +115,10 @@ void MMU_setup(void)
if (strstr(cmd_line, "noltlbs")) {
__map_without_ltlbs = 1;
}
#ifdef CONFIG_DEBUG_PAGEALLOC
__map_without_bats = 1;
__map_without_ltlbs = 1;
#endif
}
/*

View File

@ -451,3 +451,55 @@ exit:
return ret;
}
#ifdef CONFIG_DEBUG_PAGEALLOC
static int __change_page_attr(struct page *page, pgprot_t prot)
{
pte_t *kpte;
pmd_t *kpmd;
unsigned long address;
BUG_ON(PageHighMem(page));
address = (unsigned long)page_address(page);
if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address))
return 0;
if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
return -EINVAL;
set_pte_at(&init_mm, address, kpte, mk_pte(page, prot));
wmb();
flush_HPTE(0, address, pmd_val(*kpmd));
pte_unmap(kpte);
return 0;
}
/*
* Change the page attributes of an page in the linear mapping.
*
* THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY
*/
static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
int i, err = 0;
unsigned long flags;
local_irq_save(flags);
for (i = 0; i < numpages; i++, page++) {
err = __change_page_attr(page, prot);
if (err)
break;
}
local_irq_restore(flags);
return err;
}
void kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
}
#endif /* CONFIG_DEBUG_PAGEALLOC */

View File

@ -85,8 +85,10 @@ unsigned long __init mmu_mapin_ram(void)
unsigned long max_size = (256<<20);
unsigned long align;
if (__map_without_bats)
if (__map_without_bats) {
printk(KERN_DEBUG "RAM mapped without BATs\n");
return 0;
}
/* Set up BAT2 and if necessary BAT3 to cover RAM. */

View File

@ -64,6 +64,12 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
memcpy(dst, src, len)
#ifdef CONFIG_DEBUG_PAGEALLOC
/* internal debugging function */
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHEFLUSH_H */