A few MIPS fixes for 4.19:

- Fix microMIPS build failures by adding a .insn directive to the
     barrier_before_unreachable() asm statement in order to convince the
     toolchain that the asm statement is a valid branch target rather
     than a bogus attempt to switch ISA.
 
   - Clean up our declarations of TLB functions that we overwrite with
     generated code in order to prevent the compiler making assumptions
     about alignment that cause microMIPS kernels built with GCC 7 &
     above to die early during boot.
 
   - Fix up a regression for MIPS32 kernels which slipped into the main
     MIPS pull for 4.19, causing CONFIG_32BIT=y kernels to contain
     inappropriate MIPS64 instructions.
 
   - Extend our existing workaround for MIPSr6 builds that end up using
     the __multi3 intrinsic to GCC 7 & below, rather than just GCC 7.
 -----BEGIN PGP SIGNATURE-----
 
 iIsEABYIADMWIQRgLjeFAZEXQzy86/s+p5+stXUA3QUCW37wVhUccGF1bC5idXJ0
 b25AbWlwcy5jb20ACgkQPqefrLV1AN18iAD/ZO02rgkTgMG7NvZMtbOwflxe1aVz
 YpAQzcOSz+CBxgUA/30ZwZm37hgMi3YWOJMSfmbuWKsYi+/vkcjwlfai7UUF
 =oJFy
 -----END PGP SIGNATURE-----

Merge tag 'mips_4.19_2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS fixes from Paul Burton:

  - Fix microMIPS build failures by adding a .insn directive to the
    barrier_before_unreachable() asm statement in order to convince the
    toolchain that the asm statement is a valid branch target rather
    than a bogus attempt to switch ISA.

  - Clean up our declarations of TLB functions that we overwrite with
    generated code in order to prevent the compiler making assumptions
    about alignment that cause microMIPS kernels built with GCC 7 &
    above to die early during boot.

  - Fix up a regression for MIPS32 kernels which slipped into the main
    MIPS pull for 4.19, causing CONFIG_32BIT=y kernels to contain
    inappropriate MIPS64 instructions.

  - Extend our existing workaround for MIPSr6 builds that end up using
    the __multi3 intrinsic to GCC 7 & below, rather than just GCC 7.

* tag 'mips_4.19_2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
  MIPS: lib: Provide MIPS64r6 __multi3() for GCC < 7
  MIPS: Workaround GCC __builtin_unreachable reordering bug
  compiler.h: Allow arch-specific asm/compiler.h
  MIPS: Avoid move psuedo-instruction whilst using MIPS_ISA_LEVEL
  MIPS: Consistently declare TLB functions
  MIPS: Export tlbmiss_handler_setup_pgd near its definition
This commit is contained in:
Linus Torvalds 2018-08-23 14:23:08 -07:00
commit 0c4b0f815f
12 changed files with 116 additions and 69 deletions

View File

@ -841,6 +841,14 @@ config REFCOUNT_FULL
against various use-after-free conditions that can be used in against various use-after-free conditions that can be used in
security flaw exploits. security flaw exploits.
config HAVE_ARCH_COMPILER_H
bool
help
An architecture can select this if it provides an
asm/compiler.h header that should be included after
linux/compiler-*.h in order to override macro definitions that those
headers generally provide.
config HAVE_ARCH_PREL32_RELOCATIONS config HAVE_ARCH_PREL32_RELOCATIONS
bool bool
help help

View File

@ -33,6 +33,7 @@ config MIPS
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select HANDLE_DOMAIN_IRQ select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU

View File

@ -5,3 +5,4 @@
#include <asm-generic/asm-prototypes.h> #include <asm-generic/asm-prototypes.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/mmu_context.h>

View File

@ -122,8 +122,8 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \ " sc %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \ " .set mips0 \n" \
" move %0, %1 \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
@ -190,9 +190,11 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
__asm__ __volatile__( __asm__ __volatile__(
" .set "MIPS_ISA_LEVEL" \n" " .set "MIPS_ISA_LEVEL" \n"
"1: ll %1, %2 # atomic_sub_if_positive\n" "1: ll %1, %2 # atomic_sub_if_positive\n"
" .set mips0 \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" move %1, %0 \n" " move %1, %0 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" .set "MIPS_ISA_LEVEL" \n"
" sc %1, %2 \n" " sc %1, %2 \n"
"\t" __scbeqz " %1, 1b \n" "\t" __scbeqz " %1, 1b \n"
"1: \n" "1: \n"

View File

@ -8,6 +8,41 @@
#ifndef _ASM_COMPILER_H #ifndef _ASM_COMPILER_H
#define _ASM_COMPILER_H #define _ASM_COMPILER_H
/*
* With GCC 4.5 onwards we can use __builtin_unreachable to indicate to the
* compiler that a particular code path will never be hit. This allows it to be
* optimised out of the generated binary.
*
* Unfortunately at least GCC 4.6.3 through 7.3.0 inclusive suffer from a bug
* that can lead to instructions from beyond an unreachable statement being
* incorrectly reordered into earlier delay slots if the unreachable statement
* is the only content of a case in a switch statement. This can lead to
* seemingly random behaviour, such as invalid memory accesses from incorrectly
* reordered loads or stores. See this potential GCC fix for details:
*
* https://gcc.gnu.org/ml/gcc-patches/2015-09/msg00360.html
*
* It is unclear whether GCC 8 onwards suffer from the same issue - nothing
* relevant is mentioned in GCC 8 release notes and nothing obviously relevant
* stands out in GCC commit logs, but these newer GCC versions generate very
* different code for the testcase which doesn't exhibit the bug.
*
* GCC also handles stack allocation suboptimally when calling noreturn
* functions or calling __builtin_unreachable():
*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
*
* We work around both of these issues by placing a volatile asm statement,
* which GCC is prevented from reordering past, prior to __builtin_unreachable
* calls.
*
* The .insn statement is required to ensure that any branches to the
* statement, which sadly must be kept due to the asm statement, are known to
* be branches to code and satisfy linker requirements for microMIPS kernels.
*/
#undef barrier_before_unreachable
#define barrier_before_unreachable() asm volatile(".insn")
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
#define GCC_IMM_ASM() "n" #define GCC_IMM_ASM() "n"
#define GCC_REG_ACCUM "$0" #define GCC_REG_ACCUM "$0"

View File

@ -32,6 +32,7 @@ do { \
} while (0) } while (0)
extern void tlbmiss_handler_setup_pgd(unsigned long); extern void tlbmiss_handler_setup_pgd(unsigned long);
extern char tlbmiss_handler_setup_pgd_end[];
/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */ /* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \

View File

@ -24,4 +24,13 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, struct uasm_reloc **r,
enum tlb_write_entry wmode); enum tlb_write_entry wmode);
extern void handle_tlbl(void);
extern char handle_tlbl_end[];
extern void handle_tlbs(void);
extern char handle_tlbs_end[];
extern void handle_tlbm(void);
extern char handle_tlbm_end[];
#endif /* __ASM_TLBEX_H */ #endif /* __ASM_TLBEX_H */

View File

@ -67,14 +67,12 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/tlbex.h>
#include <asm/uasm.h> #include <asm/uasm.h>
extern void check_wait(void); extern void check_wait(void);
extern asmlinkage void rollback_handle_int(void); extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void); extern asmlinkage void handle_int(void);
extern u32 handle_tlbl[];
extern u32 handle_tlbs[];
extern u32 handle_tlbm[];
extern asmlinkage void handle_adel(void); extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void); extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void); extern asmlinkage void handle_ibe(void);

View File

@ -4,12 +4,12 @@
#include "libgcc.h" #include "libgcc.h"
/* /*
* GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
* specific case only we'll implement it here. * that specific case only we implement that intrinsic here.
* *
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
*/ */
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
/* multiply 64-bit values, low 64-bits returned */ /* multiply 64-bit values, low 64-bits returned */
static inline long long notrace dmulu(long long a, long long b) static inline long long notrace dmulu(long long a, long long b)

View File

@ -12,16 +12,17 @@
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org> * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
*/ */
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/export.h>
#include <asm/regdef.h> #include <asm/regdef.h>
#define FASTPATH_SIZE 128 #define FASTPATH_SIZE 128
EXPORT(tlbmiss_handler_setup_pgd_start)
LEAF(tlbmiss_handler_setup_pgd) LEAF(tlbmiss_handler_setup_pgd)
1: j 1b /* Dummy, will be replaced. */ 1: j 1b /* Dummy, will be replaced. */
.space 64 .space 64
END(tlbmiss_handler_setup_pgd) END(tlbmiss_handler_setup_pgd)
EXPORT(tlbmiss_handler_setup_pgd_end) EXPORT(tlbmiss_handler_setup_pgd_end)
EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd)
LEAF(handle_tlbm) LEAF(handle_tlbm)
.space FASTPATH_SIZE * 4 .space FASTPATH_SIZE * 4

View File

@ -31,6 +31,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpu-type.h> #include <asm/cpu-type.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/war.h> #include <asm/war.h>
#include <asm/uasm.h> #include <asm/uasm.h>
@ -253,8 +254,10 @@ static void output_pgtable_bits_defines(void)
pr_debug("\n"); pr_debug("\n");
} }
static inline void dump_handler(const char *symbol, const u32 *handler, int count) static inline void dump_handler(const char *symbol, const void *start, const void *end)
{ {
unsigned int count = (end - start) / sizeof(u32);
const u32 *handler = start;
int i; int i;
pr_debug("LEAF(%s)\n", symbol); pr_debug("LEAF(%s)\n", symbol);
@ -402,12 +405,6 @@ static void build_restore_work_registers(u32 **p)
* CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
* we cannot do r3000 under these circumstances. * we cannot do r3000 under these circumstances.
* *
* Declare pgd_current here instead of including mmu_context.h to avoid type
* conflicts for tlbmiss_handler_setup_pgd
*/
extern unsigned long pgd_current[];
/*
* The R3000 TLB handler is simple. * The R3000 TLB handler is simple.
*/ */
static void build_r3000_tlb_refill_handler(void) static void build_r3000_tlb_refill_handler(void)
@ -444,8 +441,7 @@ static void build_r3000_tlb_refill_handler(void)
memcpy((void *)ebase, tlb_handler, 0x80); memcpy((void *)ebase, tlb_handler, 0x80);
local_flush_icache_range(ebase, ebase + 0x80); local_flush_icache_range(ebase, ebase + 0x80);
dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80));
dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
} }
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
@ -1465,8 +1461,7 @@ static void build_r4000_tlb_refill_handler(void)
memcpy((void *)ebase, final_handler, 0x100); memcpy((void *)ebase, final_handler, 0x100);
local_flush_icache_range(ebase, ebase + 0x100); local_flush_icache_range(ebase, ebase + 0x100);
dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100));
dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
} }
static void setup_pw(void) static void setup_pw(void)
@ -1568,31 +1563,21 @@ static void build_loongson3_tlb_refill_handler(void)
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
memcpy((void *)(ebase + 0x80), tlb_handler, 0x80); memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
local_flush_icache_range(ebase + 0x80, ebase + 0x100); local_flush_icache_range(ebase + 0x80, ebase + 0x100);
dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32); dump_handler("loongson3_tlb_refill",
(u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100));
} }
extern u32 handle_tlbl[], handle_tlbl_end[];
extern u32 handle_tlbs[], handle_tlbs_end[];
extern u32 handle_tlbm[], handle_tlbm_end[];
extern u32 tlbmiss_handler_setup_pgd_start[];
extern u32 tlbmiss_handler_setup_pgd[];
EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd);
extern u32 tlbmiss_handler_setup_pgd_end[];
static void build_setup_pgd(void) static void build_setup_pgd(void)
{ {
const int a0 = 4; const int a0 = 4;
const int __maybe_unused a1 = 5; const int __maybe_unused a1 = 5;
const int __maybe_unused a2 = 6; const int __maybe_unused a2 = 6;
u32 *p = tlbmiss_handler_setup_pgd_start; u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
const int tlbmiss_handler_setup_pgd_size =
tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
long pgdc = (long)pgd_current; long pgdc = (long)pgd_current;
#endif #endif
memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size * memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
sizeof(tlbmiss_handler_setup_pgd[0]));
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
pgd_reg = allocate_kscratch(); pgd_reg = allocate_kscratch();
@ -1645,15 +1630,15 @@ static void build_setup_pgd(void)
else else
uasm_i_nop(&p); uasm_i_nop(&p);
#endif #endif
if (p >= tlbmiss_handler_setup_pgd_end) if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
panic("tlbmiss_handler_setup_pgd space exceeded"); panic("tlbmiss_handler_setup_pgd space exceeded");
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
(unsigned int)(p - tlbmiss_handler_setup_pgd)); (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd, dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
tlbmiss_handler_setup_pgd_size); tlbmiss_handler_setup_pgd_end);
} }
static void static void
@ -1922,12 +1907,11 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
static void build_r3000_tlb_load_handler(void) static void build_r3000_tlb_load_handler(void)
{ {
u32 *p = handle_tlbl; u32 *p = (u32 *)handle_tlbl;
const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
struct uasm_label *l = labels; struct uasm_label *l = labels;
struct uasm_reloc *r = relocs; struct uasm_reloc *r = relocs;
memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); memset(p, 0, handle_tlbl_end - (char *)p);
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
@ -1941,24 +1925,23 @@ static void build_r3000_tlb_load_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
if (p >= handle_tlbl_end) if (p >= (u32 *)handle_tlbl_end)
panic("TLB load handler fastpath space exceeded"); panic("TLB load handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbl)); (unsigned int)(p - (u32 *)handle_tlbl));
dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end);
} }
static void build_r3000_tlb_store_handler(void) static void build_r3000_tlb_store_handler(void)
{ {
u32 *p = handle_tlbs; u32 *p = (u32 *)handle_tlbs;
const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
struct uasm_label *l = labels; struct uasm_label *l = labels;
struct uasm_reloc *r = relocs; struct uasm_reloc *r = relocs;
memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); memset(p, 0, handle_tlbs_end - (char *)p);
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
@ -1972,24 +1955,23 @@ static void build_r3000_tlb_store_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
if (p >= handle_tlbs_end) if (p >= (u32 *)handle_tlbs_end)
panic("TLB store handler fastpath space exceeded"); panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbs)); (unsigned int)(p - (u32 *)handle_tlbs));
dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end);
} }
static void build_r3000_tlb_modify_handler(void) static void build_r3000_tlb_modify_handler(void)
{ {
u32 *p = handle_tlbm; u32 *p = (u32 *)handle_tlbm;
const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
struct uasm_label *l = labels; struct uasm_label *l = labels;
struct uasm_reloc *r = relocs; struct uasm_reloc *r = relocs;
memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); memset(p, 0, handle_tlbm_end - (char *)p);
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
@ -2003,14 +1985,14 @@ static void build_r3000_tlb_modify_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
if (p >= handle_tlbm_end) if (p >= (u32 *)handle_tlbm_end)
panic("TLB modify handler fastpath space exceeded"); panic("TLB modify handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbm)); (unsigned int)(p - (u32 *)handle_tlbm));
dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size); dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end);
} }
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
@ -2102,12 +2084,11 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
static void build_r4000_tlb_load_handler(void) static void build_r4000_tlb_load_handler(void)
{ {
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl); u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
struct uasm_label *l = labels; struct uasm_label *l = labels;
struct uasm_reloc *r = relocs; struct uasm_reloc *r = relocs;
struct work_registers wr; struct work_registers wr;
memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); memset(p, 0, handle_tlbl_end - (char *)p);
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
@ -2288,25 +2269,24 @@ static void build_r4000_tlb_load_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
if (p >= handle_tlbl_end) if (p >= (u32 *)handle_tlbl_end)
panic("TLB load handler fastpath space exceeded"); panic("TLB load handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbl)); (unsigned int)(p - (u32 *)handle_tlbl));
dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end);
} }
static void build_r4000_tlb_store_handler(void) static void build_r4000_tlb_store_handler(void)
{ {
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs); u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
struct uasm_label *l = labels; struct uasm_label *l = labels;
struct uasm_reloc *r = relocs; struct uasm_reloc *r = relocs;
struct work_registers wr; struct work_registers wr;
memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); memset(p, 0, handle_tlbs_end - (char *)p);
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
@ -2343,25 +2323,24 @@ static void build_r4000_tlb_store_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
if (p >= handle_tlbs_end) if (p >= (u32 *)handle_tlbs_end)
panic("TLB store handler fastpath space exceeded"); panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbs)); (unsigned int)(p - (u32 *)handle_tlbs));
dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end);
} }
static void build_r4000_tlb_modify_handler(void) static void build_r4000_tlb_modify_handler(void)
{ {
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm); u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
struct uasm_label *l = labels; struct uasm_label *l = labels;
struct uasm_reloc *r = relocs; struct uasm_reloc *r = relocs;
struct work_registers wr; struct work_registers wr;
memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); memset(p, 0, handle_tlbm_end - (char *)p);
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
@ -2399,14 +2378,14 @@ static void build_r4000_tlb_modify_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
if (p >= handle_tlbm_end) if (p >= (u32 *)handle_tlbm_end)
panic("TLB modify handler fastpath space exceeded"); panic("TLB modify handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbm)); (unsigned int)(p - (u32 *)handle_tlbm));
dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end);
} }
static void flush_tlb_handlers(void) static void flush_tlb_handlers(void)

View File

@ -66,6 +66,18 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#error "Unknown compiler" #error "Unknown compiler"
#endif #endif
/*
* Some architectures need to provide custom definitions of macros provided
* by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
* conditionally rather than using an asm-generic wrapper in order to avoid
* build failures if any C compilation, which will include this file via an
* -include argument in c_flags, occurs prior to the asm-generic wrappers being
* generated.
*/
#ifdef CONFIG_HAVE_ARCH_COMPILER_H
#include <asm/compiler.h>
#endif
/* /*
* Generic compiler-independent macros required for kernel * Generic compiler-independent macros required for kernel
* build go below this comment. Actual compiler/compiler version * build go below this comment. Actual compiler/compiler version