From 8f6c99c11ae63ce887686f3e51c412cc4d8d8a7d Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 19 Apr 2005 13:29:17 -0700 Subject: [PATCH] [PATCH] freepgt: remove arch pgd_addr_end ia64 and sparc64 hurriedly had to introduce their own variants of pgd_addr_end, to leapfrog over the holes in their virtual address spaces which the final clear_page_range suddenly presented when converted from pgd_index to pgd_addr_end. But now that free_pgtables respects the vma list, those holes are never presented, and the arch variants can go. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable.h | 8 +++----- include/asm-ia64/pgtable.h | 26 -------------------------- include/asm-sparc64/pgtable.h | 15 --------------- 3 files changed, 3 insertions(+), 46 deletions(-) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index a3b28710d56c..1f4ec7b70270 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -140,17 +140,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #endif /* - * When walking page tables, get the address of the next boundary, or - * the end address of the range if that comes earlier. Although end might - * wrap to 0 only in clear_page_range, __boundary may wrap to 0 throughout. + * When walking page tables, get the address of the next boundary, + * or the end address of the range if that comes earlier. Although no + * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. */ -#ifndef pgd_addr_end #define pgd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) -#endif #ifndef pud_addr_end #define pud_addr_end(addr, end) \ diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index bbf6dd757003..fecfd0f68961 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -561,32 +561,6 @@ do { \ #define __HAVE_ARCH_PGD_OFFSET_GATE #define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE -/* - * Override for pgd_addr_end() to deal with the virtual address space holes - * in each region. In regions 0..4 virtual address bits are used like this: - * +--------+------+--------+-----+-----+--------+ - * | pgdhi3 | rsvd | pgdlow | pmd | pte | offset | - * +--------+------+--------+-----+-----+--------+ - * 'pgdlow' overflows to pgdhi3 (a.k.a. region bits) leaving rsvd==0 - */ -#define IA64_PGD_OVERFLOW (PGDIR_SIZE << (PAGE_SHIFT-6)) - -#define pgd_addr_end(addr, end) \ -({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ - if (REGION_NUMBER(__boundary) < 5 && \ - __boundary & IA64_PGD_OVERFLOW) \ - __boundary += (RGN_SIZE - 1) & ~(IA64_PGD_OVERFLOW - 1);\ - (__boundary - 1 < (end) - 1)? __boundary: (end); \ -}) - -#define pmd_addr_end(addr, end) \ -({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ - if (REGION_NUMBER(__boundary) < 5 && \ - __boundary & IA64_PGD_OVERFLOW) \ - __boundary += (RGN_SIZE - 1) & ~(IA64_PGD_OVERFLOW - 1);\ - (__boundary - 1 < (end) - 1)? __boundary: (end); \ -}) - #include #include diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index ca04ac105b69..c93011574843 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -424,21 +424,6 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) -/* Override for {pgd,pmd}_addr_end() to deal with the virtual address - * space hole. We simply sign extend bit 43. - */ -#define pgd_addr_end(addr, end) \ -({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ - __boundary = ((long) (__boundary << 20)) >> 20; \ - (__boundary - 1 < (end) - 1)? __boundary: (end); \ -}) - -#define pmd_addr_end(addr, end) \ -({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ - __boundary = ((long) (__boundary << 20)) >> 20; \ - (__boundary - 1 < (end) - 1)? __boundary: (end); \ -}) - #include /* We provide our own get_unmapped_area to cope with VA holes for userland */