From 952a31c9e6fa963eabf3692f31a769e59f4c8303 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 18 Sep 2018 14:51:50 +0200 Subject: [PATCH] asm-generic/tlb: Introduce CONFIG_HAVE_MMU_GATHER_NO_GATHER=y Add the Kconfig option HAVE_MMU_GATHER_NO_GATHER to the generic mmu_gather code. If the option is set the mmu_gather will not track individual pages for delayed page free anymore. A platform that enables the option needs to provide its own implementation of the __tlb_remove_page_size() function to free pages. No change in behavior intended. Signed-off-by: Martin Schwidefsky Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: aneesh.kumar@linux.vnet.ibm.com Cc: heiko.carstens@de.ibm.com Cc: linux@armlinux.org.uk Cc: npiggin@gmail.com Link: http://lkml.kernel.org/r/20180918125151.31744-2-schwidefsky@de.ibm.com Signed-off-by: Ingo Molnar --- arch/Kconfig | 3 + include/asm-generic/tlb.h | 9 ++- mm/mmu_gather.c | 127 +++++++++++++++++++++----------------- 3 files changed, 80 insertions(+), 59 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 04b3e8b94cfe..a826843470ed 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -389,6 +389,9 @@ config HAVE_RCU_TABLE_NO_INVALIDATE config HAVE_MMU_GATHER_PAGE_SIZE bool +config HAVE_MMU_GATHER_NO_GATHER + bool + config ARCH_HAVE_NMI_SAFE_CMPXCHG bool diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 81799e6a4304..af20aa8255cd 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -191,6 +191,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #endif +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. @@ -215,6 +216,10 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); +#endif + /* * struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. @@ -261,6 +266,7 @@ struct mmu_gather { unsigned int batch_count; +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; @@ -268,6 +274,7 @@ struct mmu_gather { #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE unsigned int page_size; #endif +#endif }; void arch_tlb_gather_mmu(struct mmu_gather *tlb, @@ -276,8 +283,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); void tlb_flush_mmu_free(struct mmu_gather *tlb); -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address, diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 2a5322d52b0a..ab220edcd7ef 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -13,6 +13,8 @@ #ifdef HAVE_GENERIC_MMU_GATHER +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + static bool tlb_next_batch(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; @@ -41,37 +43,10 @@ static bool tlb_next_batch(struct mmu_gather *tlb) return true; } -void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - tlb->mm = mm; - - /* Is it from 0 to ~0? */ - tlb->fullmm = !(start | (end+1)); - tlb->need_flush_all = 0; - tlb->local.next = NULL; - tlb->local.nr = 0; - tlb->local.max = ARRAY_SIZE(tlb->__pages); - tlb->active = &tlb->local; - tlb->batch_count = 0; - -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb->batch = NULL; -#endif -#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE - tlb->page_size = 0; -#endif - - __tlb_reset_range(tlb); -} - -void tlb_flush_mmu_free(struct mmu_gather *tlb) +static void tlb_batch_pages_flush(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb_table_flush(tlb); -#endif for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; @@ -79,31 +54,10 @@ void tlb_flush_mmu_free(struct mmu_gather *tlb) tlb->active = &tlb->local; } -void tlb_flush_mmu(struct mmu_gather *tlb) -{ - tlb_flush_mmu_tlbonly(tlb); - tlb_flush_mmu_free(tlb); -} - -/* tlb_finish_mmu - * Called at the end of the shootdown operation to free up any resources - * that were required. - */ -void arch_tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end, bool force) +static void tlb_batch_list_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch, *next; - if (force) { - __tlb_reset_range(tlb); - __tlb_adjust_range(tlb, start, end - start); - } - - tlb_flush_mmu(tlb); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - for (batch = tlb->local.next; batch; batch = next) { next = batch->next; free_pages((unsigned long)batch, 0); @@ -111,13 +65,6 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb, tlb->local.next = NULL; } -/* __tlb_remove_page - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while - * handling the additional races in SMP caused by other CPUs caching valid - * mappings in their TLBs. Returns the number of free page slots left. - * When out of page slots we must call tlb_flush_mmu(). - *returns true if the caller should flush. - */ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { struct mmu_gather_batch *batch; @@ -144,6 +91,72 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ return false; } +#endif /* HAVE_MMU_GATHER_NO_GATHER */ + +void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + tlb->mm = mm; + + /* Is it from 0 to ~0? */ + tlb->fullmm = !(start | (end+1)); + +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb->need_flush_all = 0; + tlb->local.next = NULL; + tlb->local.nr = 0; + tlb->local.max = ARRAY_SIZE(tlb->__pages); + tlb->active = &tlb->local; + tlb->batch_count = 0; +#endif + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; +#endif +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + tlb->page_size = 0; +#endif + + __tlb_reset_range(tlb); +} + +void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); +#endif +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb_batch_pages_flush(tlb); +#endif +} + +void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + +/* tlb_finish_mmu + * Called at the end of the shootdown operation to free up any resources + * that were required. + */ +void arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force) +{ + if (force) { + __tlb_reset_range(tlb); + __tlb_adjust_range(tlb, start, end - start); + } + + tlb_flush_mmu(tlb); + + /* keep the page table cache within bounds */ + check_pgt_cache(); +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb_batch_list_free(tlb); +#endif +} + #endif /* HAVE_GENERIC_MMU_GATHER */ #ifdef CONFIG_HAVE_RCU_TABLE_FREE