Files
linux/arch/powerpc/include/asm/book3s/64/tlbflush.h
Nicholas Piggin fd193f85d3 powerpc/64s: Remove spurious fault flushing for NMMU
Commit 6d8278c414 ("powerpc/64s/radix: do not flush TLB on spurious
fault") removed the TLB flush for spurious faults, except when a
coprocessor (nest MMU) maps the address space. This is not needed
because the NMMU workaround in the PTE permission upgrade paths
prevents PTEs existing with less restrictive access permissions than
their corresponding TLB entries have.

Remove it and replace with a comment.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220525022358.780745-4-npiggin@gmail.com
2022-07-27 21:36:04 +10:00

175 lines
4.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
#define MMU_NO_CONTEXT ~0UL
#include <linux/mm_types.h>
#include <asm/book3s/64/tlbflush-hash.h>
#include <asm/book3s/64/tlbflush-radix.h>
/* TLB flush actions. Used as argument to tlbiel_all() */
enum {
TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
};
static inline void tlbiel_all(void)
{
/*
* This is used for host machine check and bootup.
*
* This uses early_radix_enabled and implementations use
* early_cpu_has_feature etc because that works early in boot
* and this is the machine check path which is not performance
* critical.
*/
if (early_radix_enabled())
radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
else
hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
}
static inline void tlbiel_all_lpid(bool radix)
{
/*
* This is used for guest machine check.
*/
if (radix)
radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
else
hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
}
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (radix_enabled())
return radix__flush_pmd_tlb_range(vma, start, end);
return hash__flush_tlb_range(vma, start, end);
}
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
if (radix_enabled())
return radix__flush_hugetlb_tlb_range(vma, start, end);
return hash__flush_tlb_range(vma, start, end);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (radix_enabled())
return radix__flush_tlb_range(vma, start, end);
return hash__flush_tlb_range(vma, start, end);
}
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
if (radix_enabled())
return radix__flush_tlb_kernel_range(start, end);
return hash__flush_tlb_kernel_range(start, end);
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
if (radix_enabled())
return radix__local_flush_tlb_mm(mm);
return hash__local_flush_tlb_mm(mm);
}
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
if (radix_enabled())
return radix__local_flush_tlb_page(vma, vmaddr);
return hash__local_flush_tlb_page(vma, vmaddr);
}
static inline void local_flush_all_mm(struct mm_struct *mm)
{
if (radix_enabled())
return radix__local_flush_all_mm(mm);
return hash__local_flush_all_mm(mm);
}
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (radix_enabled())
return radix__tlb_flush(tlb);
return hash__tlb_flush(tlb);
}
#ifdef CONFIG_SMP
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (radix_enabled())
return radix__flush_tlb_mm(mm);
return hash__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
if (radix_enabled())
return radix__flush_tlb_page(vma, vmaddr);
return hash__flush_tlb_page(vma, vmaddr);
}
static inline void flush_all_mm(struct mm_struct *mm)
{
if (radix_enabled())
return radix__flush_all_mm(mm);
return hash__flush_all_mm(mm);
}
#else
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
#define flush_all_mm(mm) local_flush_all_mm(mm)
#endif /* CONFIG_SMP */
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
unsigned long address)
{
/*
* Book3S 64 does not require spurious fault flushes because the PTE
* must be re-fetched in case of an access permission problem. So the
* only reason for a spurious fault should be concurrent modification
* to the PTE, in which case the PTE will eventually be re-fetched by
* the MMU when it attempts the access again.
*
* See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
* Entry, Setting a Reference or Change Bit or Upgrading Access
* Authority (PTE Subject to Atomic Hardware Updates):
*
* "If the only change being made to a valid PTE that is subject to
* atomic hardware updates is to set the Reference or Change bit to
* 1 or to upgrade access authority, a simpler sequence suffices
* because the translation hardware will refetch the PTE if an
* access is attempted for which the only problems were reference
* and/or change bits needing to be set or insufficient access
* authority."
*
* The nest MMU in POWER9 does not perform this PTE re-fetch, but
* it avoids the spurious fault problem by flushing the TLB before
* upgrading PTE permissions, see radix__ptep_set_access_flags.
*/
}
extern bool tlbie_capable;
extern bool tlbie_enabled;
static inline bool cputlb_use_tlbie(void)
{
return tlbie_enabled;
}
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */