mirror of
https://github.com/torvalds/linux.git
synced 2024-11-19 02:21:47 +00:00
fb7332a9fe
On architectures with hardware broadcasting of TLB invalidation messages , it makes sense to reduce the range of the mmu_gather structure when unmapping page ranges based on the dirty address information passed to tlb_remove_tlb_entry. arm64 already does this by directly manipulating the start/end fields of the gather structure, but this confuses the generic code which does not expect these fields to change and can end up calculating invalid, negative ranges when forcing a flush in zap_pte_range. This patch moves the minimal range calculation out of the arm64 code and into the generic implementation, simplifying zap_pte_range in the process (which no longer needs to care about start/end, since they will point to the appropriate ranges already). With the range being tracked by core code, the need_flush flag is dropped in favour of checking that the end of the range has actually been set. Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King - ARM Linux <linux@arm.linux.org.uk> Cc: Michal Simek <monstr@monstr.eu> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
26 lines
585 B
C
26 lines
585 B
C
#ifndef _ASM_POWERPC_PGALLOC_H
|
|
#define _ASM_POWERPC_PGALLOC_H
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#ifdef CONFIG_PPC_BOOK3E
|
|
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
|
|
#else /* CONFIG_PPC_BOOK3E */
|
|
static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
|
|
unsigned long address)
|
|
{
|
|
}
|
|
#endif /* !CONFIG_PPC_BOOK3E */
|
|
|
|
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/pgalloc-64.h>
|
|
#else
|
|
#include <asm/pgalloc-32.h>
|
|
#endif
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_PGALLOC_H */
|