mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 07:01:57 +00:00
4b3073e1c5
On VIVT ARM, when we have multiple shared mappings of the same file in the same MM, we need to ensure that we have coherency across all copies. We do this via make_coherent() by making the pages uncacheable. This used to work fine, until we allowed highmem with highpte - we now have a page table which is mapped as required, and is not available for modification via update_mmu_cache(). Ralf Beache suggested getting rid of the PTE value passed to update_mmu_cache(): On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables to construct a pointer to the pte again. Passing a pte_t * is much more elegant. Maybe we might even replace the pte argument with the pte_t? Ben Herrenschmidt would also like the pte pointer for PowerPC: Passing the ptep in there is exactly what I want. I want that -instead- of the PTE value, because I have issue on some ppc cases, for I$/D$ coherency, where set_pte_at() may decide to mask out the _PAGE_EXEC. So, pass in the mapped page table pointer into update_mmu_cache(), and remove the PTE value, updating all implementations and call sites to suit. Includes a fix from Stephen Rothwell: sparc: fix fallout from update_mmu_cache API change Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
98 lines
2.9 KiB
C
98 lines
2.9 KiB
C
#ifndef _ASM_M32R_TLBFLUSH_H
|
|
#define _ASM_M32R_TLBFLUSH_H
|
|
|
|
#include <asm/m32r.h>
|
|
|
|
/*
|
|
* TLB flushing:
|
|
*
|
|
* - flush_tlb() flushes the current mm struct TLBs
|
|
* - flush_tlb_all() flushes all processes TLBs
|
|
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
|
* - flush_tlb_page(vma, vmaddr) flushes one page
|
|
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
|
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
|
|
*/
|
|
|
|
extern void local_flush_tlb_all(void);
|
|
extern void local_flush_tlb_mm(struct mm_struct *);
|
|
extern void local_flush_tlb_page(struct vm_area_struct *, unsigned long);
|
|
extern void local_flush_tlb_range(struct vm_area_struct *, unsigned long,
|
|
unsigned long);
|
|
|
|
#ifndef CONFIG_SMP
|
|
#ifdef CONFIG_MMU
|
|
#define flush_tlb_all() local_flush_tlb_all()
|
|
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
|
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
|
|
#define flush_tlb_range(vma, start, end) \
|
|
local_flush_tlb_range(vma, start, end)
|
|
#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
|
|
#else /* CONFIG_MMU */
|
|
#define flush_tlb_all() do { } while (0)
|
|
#define flush_tlb_mm(mm) do { } while (0)
|
|
#define flush_tlb_page(vma, vmaddr) do { } while (0)
|
|
#define flush_tlb_range(vma, start, end) do { } while (0)
|
|
#endif /* CONFIG_MMU */
|
|
#else /* CONFIG_SMP */
|
|
extern void smp_flush_tlb_all(void);
|
|
extern void smp_flush_tlb_mm(struct mm_struct *);
|
|
extern void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
|
|
extern void smp_flush_tlb_range(struct vm_area_struct *, unsigned long,
|
|
unsigned long);
|
|
|
|
#define flush_tlb_all() smp_flush_tlb_all()
|
|
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
|
|
#define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page)
|
|
#define flush_tlb_range(vma, start, end) \
|
|
smp_flush_tlb_range(vma, start, end)
|
|
#define flush_tlb_kernel_range(start, end) smp_flush_tlb_all()
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static __inline__ void __flush_tlb_page(unsigned long page)
|
|
{
|
|
unsigned int tmpreg0, tmpreg1, tmpreg2;
|
|
|
|
__asm__ __volatile__ (
|
|
"seth %0, #high(%4) \n\t"
|
|
"st %3, @(%5, %0) \n\t"
|
|
"ldi %1, #1 \n\t"
|
|
"st %1, @(%6, %0) \n\t"
|
|
"add3 %1, %0, %7 \n\t"
|
|
".fillinsn \n"
|
|
"1: \n\t"
|
|
"ld %2, @(%6, %0) \n\t"
|
|
"bnez %2, 1b \n\t"
|
|
"ld %0, @%1+ \n\t"
|
|
"ld %1, @%1 \n\t"
|
|
"st %2, @+%0 \n\t"
|
|
"st %2, @+%1 \n\t"
|
|
: "=&r" (tmpreg0), "=&r" (tmpreg1), "=&r" (tmpreg2)
|
|
: "r" (page), "i" (MMU_REG_BASE), "i" (MSVA_offset),
|
|
"i" (MTOP_offset), "i" (MIDXI_offset)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static __inline__ void __flush_tlb_all(void)
|
|
{
|
|
unsigned int tmpreg0, tmpreg1;
|
|
|
|
__asm__ __volatile__ (
|
|
"seth %0, #high(%2) \n\t"
|
|
"or3 %0, %0, #low(%2) \n\t"
|
|
"ldi %1, #0xc \n\t"
|
|
"st %1, @%0 \n\t"
|
|
".fillinsn \n"
|
|
"1: \n\t"
|
|
"ld %1, @%0 \n\t"
|
|
"bnez %1, 1b \n\t"
|
|
: "=&r" (tmpreg0), "=&r" (tmpreg1)
|
|
: "i" (MTOP) : "memory"
|
|
);
|
|
}
|
|
|
|
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
|
|
|
|
#endif /* _ASM_M32R_TLBFLUSH_H */
|