mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
LoongArch fixes for v5.19-final
-----BEGIN PGP SIGNATURE----- iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmLj6XUWHGNoZW5odWFj YWlAa2VybmVsLm9yZwAKCRAChivD8uImeuFOEACcuQB5cLihx6Oao5MZmPlaUx3h IexoIjRKsJywH8MJHePsvfMH3E6cpTyce+uJLyj/BCWDOewlKZbUc4a0kxHUSdEo OohGFJLMb4tWUMFyELdOgjiwDuCPgVfZljsS75JvtTAuz+mnDAV4QjsCXxrVb2Qg K9yUyzUd8Pcl5c5InR6RzhQH9Sq2U93+MVKDgDuuUkJHP81bgV+pUcmDYf3Vi+7y zmKSqz5JgL/mAQ04PT1l56fgvRreUrKIZkI+yVcfU8x3ciHVdfrN8RzSxwapNLJD 5DYnXCquFC3fksOLb8kqXKY+lWP6a2VrWoM5rVzmYizuN5Y+q+zrAVEqM2KSk0+/ VkylhottrpONnIO7xXjc9JQvZZFZxDJ4PJbh6xWjT0QYgnU5r0DMjZTKksdv0Rgm Z4MHHqHMJRvgBrGEgWfZoP4ijgwIoMu+FCI/xZ8r5EvI4xCp7wWs/3DUFF2KzCYJ wzactvZcf0DFeczoyfsCF6dcLr3PqL8RB2O+Hc/mbWomSypDjDylZuc5u4nplLdy RzQkUvlLAgWTuRqsUkKDFh3rwdjgwDAdIViIk7Iy8eto7d211Zq62GwoA9BdHlUh C2H21hyqoGRsf/40ZmzMpZJDPxbGg9i5mxa/iiy/vas56emuOgUfQSnu+8poHRrk WHf7ZgtQrz6AzMyZ+w== =rqgi -----END PGP SIGNATURE----- Merge tag 'loongarch-fixes-5.19-5' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson Pull LoongArch fixes from Huacai Chen: - Fix cache size calculation, stack protection attributes, ptrace's fpr_set and "ROM Size" in boardinfo - Some cleanups and improvements of assembly - Some cleanups of unused code and useless code * tag 'loongarch-fixes-5.19-5' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: Fix wrong "ROM Size" of boardinfo LoongArch: Fix missing fcsr in ptrace's fpr_set LoongArch: Fix shared cache size calculation LoongArch: Disable executable stack by default LoongArch: Remove unused variables LoongArch: Remove clock setting during cpu hotplug stage LoongArch: Remove useless header compiler.h LoongArch: Remove several syntactic sugar macros for branches LoongArch: Re-tab the assembly files LoongArch: Simplify "BGT foo, zero" with BGTZ LoongArch: Simplify "BLT foo, zero" with BLTZ LoongArch: Simplify "BEQ/BNE foo, zero" with BEQZ/BNEZ LoongArch: Use the "move" pseudo-instruction where applicable LoongArch: Use the "jr" pseudo-instruction where applicable LoongArch: Use ABI names of registers where appropriate
This commit is contained in:
commit
a95eb1d086
@ -69,7 +69,6 @@ config LOONGARCH
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select GPIOLIB
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_COMPILER_H
|
||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
|
@ -274,16 +274,4 @@
|
||||
nor \dst, \src, zero
|
||||
.endm
|
||||
|
||||
.macro bgt r0 r1 label
|
||||
blt \r1, \r0, \label
|
||||
.endm
|
||||
|
||||
.macro bltz r0 label
|
||||
blt \r0, zero, \label
|
||||
.endm
|
||||
|
||||
.macro bgez r0 label
|
||||
bge \r0, zero, \label
|
||||
.endm
|
||||
|
||||
#endif /* _ASM_ASMMACRO_H */
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
#if __SIZEOF_LONG__ == 4
|
||||
#define __LL "ll.w "
|
||||
@ -157,27 +156,25 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
|
||||
__asm__ __volatile__(
|
||||
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
|
||||
" addi.w %0, %1, %3 \n"
|
||||
" or %1, %0, $zero \n"
|
||||
" blt %0, $zero, 2f \n"
|
||||
" move %1, %0 \n"
|
||||
" bltz %0, 2f \n"
|
||||
" sc.w %1, %2 \n"
|
||||
" beq $zero, %1, 1b \n"
|
||||
" beqz %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
|
||||
: "I" (-i));
|
||||
} else {
|
||||
__asm__ __volatile__(
|
||||
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
|
||||
" sub.w %0, %1, %3 \n"
|
||||
" or %1, %0, $zero \n"
|
||||
" blt %0, $zero, 2f \n"
|
||||
" move %1, %0 \n"
|
||||
" bltz %0, 2f \n"
|
||||
" sc.w %1, %2 \n"
|
||||
" beq $zero, %1, 1b \n"
|
||||
" beqz %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
|
||||
: "r" (i));
|
||||
}
|
||||
|
||||
@ -320,27 +317,25 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
|
||||
__asm__ __volatile__(
|
||||
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
|
||||
" addi.d %0, %1, %3 \n"
|
||||
" or %1, %0, $zero \n"
|
||||
" blt %0, $zero, 2f \n"
|
||||
" move %1, %0 \n"
|
||||
" bltz %0, 2f \n"
|
||||
" sc.d %1, %2 \n"
|
||||
" beq %1, $zero, 1b \n"
|
||||
" beqz %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
|
||||
: "I" (-i));
|
||||
} else {
|
||||
__asm__ __volatile__(
|
||||
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
|
||||
" sub.d %0, %1, %3 \n"
|
||||
" or %1, %0, $zero \n"
|
||||
" blt %0, $zero, 2f \n"
|
||||
" move %1, %0 \n"
|
||||
" bltz %0, 2f \n"
|
||||
" sc.d %1, %2 \n"
|
||||
" beq %1, $zero, 1b \n"
|
||||
" beqz %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
|
||||
: "r" (i));
|
||||
}
|
||||
|
||||
|
@ -48,9 +48,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
__asm__ __volatile__(
|
||||
"sltu %0, %1, %2\n\t"
|
||||
#if (__SIZEOF_LONG__ == 4)
|
||||
"sub.w %0, $r0, %0\n\t"
|
||||
"sub.w %0, $zero, %0\n\t"
|
||||
#elif (__SIZEOF_LONG__ == 8)
|
||||
"sub.d %0, $r0, %0\n\t"
|
||||
"sub.d %0, $zero, %0\n\t"
|
||||
#endif
|
||||
: "=r" (mask)
|
||||
: "r" (index), "r" (size)
|
||||
|
@ -55,9 +55,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
|
||||
__asm__ __volatile__( \
|
||||
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
|
||||
" bne %0, %z3, 2f \n" \
|
||||
" or $t0, %z4, $zero \n" \
|
||||
" move $t0, %z4 \n" \
|
||||
" " st " $t0, %1 \n" \
|
||||
" beq $zero, $t0, 1b \n" \
|
||||
" beqz $t0, 1b \n" \
|
||||
"2: \n" \
|
||||
__WEAK_LLSC_MB \
|
||||
: "=&r" (__ret), "=ZB"(*m) \
|
||||
|
@ -1,15 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#ifndef _ASM_COMPILER_H
|
||||
#define _ASM_COMPILER_H
|
||||
|
||||
#define GCC_OFF_SMALL_ASM() "ZC"
|
||||
|
||||
#define LOONGARCH_ISA_LEVEL "loongarch"
|
||||
#define LOONGARCH_ISA_ARCH_LEVEL "arch=loongarch"
|
||||
#define LOONGARCH_ISA_LEVEL_RAW loongarch
|
||||
#define LOONGARCH_ISA_ARCH_LEVEL_RAW LOONGARCH_ISA_LEVEL_RAW
|
||||
|
||||
#endif /* _ASM_COMPILER_H */
|
@ -288,8 +288,6 @@ struct arch_elf_state {
|
||||
.interp_fp_abi = LOONGARCH_ABI_FP_ANY, \
|
||||
}
|
||||
|
||||
#define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
|
||||
|
||||
extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
|
||||
bool is_interp, struct arch_elf_state *state);
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/futex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
||||
@ -17,7 +16,7 @@
|
||||
"1: ll.w %1, %4 # __futex_atomic_op\n" \
|
||||
" " insn " \n" \
|
||||
"2: sc.w $t0, %2 \n" \
|
||||
" beq $t0, $zero, 1b \n" \
|
||||
" beqz $t0, 1b \n" \
|
||||
"3: \n" \
|
||||
" .section .fixup,\"ax\" \n" \
|
||||
"4: li.w %0, %6 \n" \
|
||||
@ -82,9 +81,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
|
||||
"# futex_atomic_cmpxchg_inatomic \n"
|
||||
"1: ll.w %1, %3 \n"
|
||||
" bne %1, %z4, 3f \n"
|
||||
" or $t0, %z5, $zero \n"
|
||||
" move $t0, %z5 \n"
|
||||
"2: sc.w $t0, %2 \n"
|
||||
" beq $zero, $t0, 1b \n"
|
||||
" beqz $t0, 1b \n"
|
||||
"3: \n"
|
||||
__WEAK_LLSC_MB
|
||||
" .section .fixup,\"ax\" \n"
|
||||
@ -95,8 +94,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
|
||||
" "__UA_ADDR "\t1b, 4b \n"
|
||||
" "__UA_ADDR "\t2b, 4b \n"
|
||||
" .previous \n"
|
||||
: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
|
||||
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
|
||||
: "+r" (ret), "=&r" (val), "=ZC" (*uaddr)
|
||||
: "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval),
|
||||
"i" (-EFAULT)
|
||||
: "memory", "t0");
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/loongarch.h>
|
||||
|
||||
static inline void arch_local_irq_enable(void)
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
typedef struct {
|
||||
atomic_long_t a;
|
||||
|
@ -39,18 +39,6 @@ extern const struct plat_smp_ops loongson3_smp_ops;
|
||||
|
||||
#define MAX_PACKAGES 16
|
||||
|
||||
/* Chip Config register of each physical cpu package */
|
||||
extern u64 loongson_chipcfg[MAX_PACKAGES];
|
||||
#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id]))
|
||||
|
||||
/* Chip Temperature register of each physical cpu package */
|
||||
extern u64 loongson_chiptemp[MAX_PACKAGES];
|
||||
#define LOONGSON_CHIPTEMP(id) (*(volatile u32 *)(loongson_chiptemp[id]))
|
||||
|
||||
/* Freq Control register of each physical cpu package */
|
||||
extern u64 loongson_freqctrl[MAX_PACKAGES];
|
||||
#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id]))
|
||||
|
||||
#define xconf_readl(addr) readl(addr)
|
||||
#define xconf_readq(addr) readq(addr)
|
||||
|
||||
@ -58,7 +46,7 @@ static inline void xconf_writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile (
|
||||
" st.w %[v], %[hw], 0 \n"
|
||||
" ld.b $r0, %[hw], 0 \n"
|
||||
" ld.b $zero, %[hw], 0 \n"
|
||||
:
|
||||
: [hw] "r" (addr), [v] "r" (val)
|
||||
);
|
||||
@ -68,7 +56,7 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile (
|
||||
" st.d %[v], %[hw], 0 \n"
|
||||
" ld.b $r0, %[hw], 0 \n"
|
||||
" ld.b $zero, %[hw], 0 \n"
|
||||
:
|
||||
: [hw] "r" (addr), [v] "r" (val64)
|
||||
);
|
||||
|
@ -23,13 +23,13 @@
|
||||
static __always_inline void prepare_frametrace(struct pt_regs *regs)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
/* Save $r1 */
|
||||
/* Save $ra */
|
||||
STORE_ONE_REG(1)
|
||||
/* Use $r1 to save PC */
|
||||
"pcaddi $r1, 0\n\t"
|
||||
STR_LONG_S " $r1, %0\n\t"
|
||||
/* Restore $r1 */
|
||||
STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t"
|
||||
/* Use $ra to save PC */
|
||||
"pcaddi $ra, 0\n\t"
|
||||
STR_LONG_S " $ra, %0\n\t"
|
||||
/* Restore $ra */
|
||||
STR_LONG_L " $ra, %1, "STR_LONGSIZE"\n\t"
|
||||
STORE_ONE_REG(2)
|
||||
STORE_ONE_REG(3)
|
||||
STORE_ONE_REG(4)
|
||||
|
@ -44,14 +44,14 @@ struct thread_info {
|
||||
}
|
||||
|
||||
/* How to get the thread information struct from C. */
|
||||
register struct thread_info *__current_thread_info __asm__("$r2");
|
||||
register struct thread_info *__current_thread_info __asm__("$tp");
|
||||
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
return __current_thread_info;
|
||||
}
|
||||
|
||||
register unsigned long current_stack_pointer __asm__("$r3");
|
||||
register unsigned long current_stack_pointer __asm__("$sp");
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
@ -162,7 +162,7 @@ do { \
|
||||
"2: \n" \
|
||||
" .section .fixup,\"ax\" \n" \
|
||||
"3: li.w %0, %3 \n" \
|
||||
" or %1, $r0, $r0 \n" \
|
||||
" move %1, $zero \n" \
|
||||
" b 2b \n" \
|
||||
" .previous \n" \
|
||||
" .section __ex_table,\"a\" \n" \
|
||||
|
@ -4,8 +4,9 @@
|
||||
*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#include <asm/cpu-info.h>
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cpu-info.h>
|
||||
|
||||
/* Populates leaf and increments to next leaf */
|
||||
#define populate_cache(cache, leaf, c_level, c_type) \
|
||||
@ -17,6 +18,8 @@ do { \
|
||||
leaf->ways_of_associativity = c->cache.ways; \
|
||||
leaf->size = c->cache.linesz * c->cache.sets * \
|
||||
c->cache.ways; \
|
||||
if (leaf->level > 2) \
|
||||
leaf->size *= nodes_per_package; \
|
||||
leaf++; \
|
||||
} while (0)
|
||||
|
||||
@ -95,11 +98,15 @@ static void cache_cpumap_setup(unsigned int cpu)
|
||||
|
||||
int populate_cache_leaves(unsigned int cpu)
|
||||
{
|
||||
int level = 1;
|
||||
int level = 1, nodes_per_package = 1;
|
||||
struct cpuinfo_loongarch *c = ¤t_cpu_data;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
||||
|
||||
if (loongson_sysconf.nr_nodes > 1)
|
||||
nodes_per_package = loongson_sysconf.cores_per_package
|
||||
/ loongson_sysconf.cores_per_node;
|
||||
|
||||
if (c->icache.waysize) {
|
||||
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
|
||||
populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
|
||||
|
@ -27,7 +27,7 @@ SYM_FUNC_START(handle_syscall)
|
||||
|
||||
addi.d sp, sp, -PT_SIZE
|
||||
cfi_st t2, PT_R3
|
||||
cfi_rel_offset sp, PT_R3
|
||||
cfi_rel_offset sp, PT_R3
|
||||
st.d zero, sp, PT_R0
|
||||
csrrd t2, LOONGARCH_CSR_PRMD
|
||||
st.d t2, sp, PT_PRMD
|
||||
@ -50,7 +50,7 @@ SYM_FUNC_START(handle_syscall)
|
||||
cfi_st a7, PT_R11
|
||||
csrrd ra, LOONGARCH_CSR_ERA
|
||||
st.d ra, sp, PT_ERA
|
||||
cfi_rel_offset ra, PT_ERA
|
||||
cfi_rel_offset ra, PT_ERA
|
||||
|
||||
cfi_st tp, PT_R2
|
||||
cfi_st u0, PT_R21
|
||||
|
@ -17,21 +17,6 @@ u64 efi_system_table;
|
||||
struct loongson_system_configuration loongson_sysconf;
|
||||
EXPORT_SYMBOL(loongson_sysconf);
|
||||
|
||||
u64 loongson_chipcfg[MAX_PACKAGES];
|
||||
u64 loongson_chiptemp[MAX_PACKAGES];
|
||||
u64 loongson_freqctrl[MAX_PACKAGES];
|
||||
unsigned long long smp_group[MAX_PACKAGES];
|
||||
|
||||
static void __init register_addrs_set(u64 *registers, const u64 addr, int num)
|
||||
{
|
||||
u64 i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
*registers = (i << 44) | addr;
|
||||
registers++;
|
||||
}
|
||||
}
|
||||
|
||||
void __init init_environ(void)
|
||||
{
|
||||
int efi_boot = fw_arg0;
|
||||
@ -50,11 +35,6 @@ void __init init_environ(void)
|
||||
efi_memmap_init_early(&data);
|
||||
memblock_reserve(data.phys_map & PAGE_MASK,
|
||||
PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
|
||||
|
||||
register_addrs_set(smp_group, TO_UNCACHE(0x1fe01000), 16);
|
||||
register_addrs_set(loongson_chipcfg, TO_UNCACHE(0x1fe00180), 16);
|
||||
register_addrs_set(loongson_chiptemp, TO_UNCACHE(0x1fe0019c), 16);
|
||||
register_addrs_set(loongson_freqctrl, TO_UNCACHE(0x1fe001d0), 16);
|
||||
}
|
||||
|
||||
static int __init init_cpu_fullname(void)
|
||||
|
@ -27,78 +27,78 @@
|
||||
.endm
|
||||
|
||||
.macro sc_save_fp base
|
||||
EX fst.d $f0, \base, (0 * FPU_REG_WIDTH)
|
||||
EX fst.d $f1, \base, (1 * FPU_REG_WIDTH)
|
||||
EX fst.d $f2, \base, (2 * FPU_REG_WIDTH)
|
||||
EX fst.d $f3, \base, (3 * FPU_REG_WIDTH)
|
||||
EX fst.d $f4, \base, (4 * FPU_REG_WIDTH)
|
||||
EX fst.d $f5, \base, (5 * FPU_REG_WIDTH)
|
||||
EX fst.d $f6, \base, (6 * FPU_REG_WIDTH)
|
||||
EX fst.d $f7, \base, (7 * FPU_REG_WIDTH)
|
||||
EX fst.d $f8, \base, (8 * FPU_REG_WIDTH)
|
||||
EX fst.d $f9, \base, (9 * FPU_REG_WIDTH)
|
||||
EX fst.d $f10, \base, (10 * FPU_REG_WIDTH)
|
||||
EX fst.d $f11, \base, (11 * FPU_REG_WIDTH)
|
||||
EX fst.d $f12, \base, (12 * FPU_REG_WIDTH)
|
||||
EX fst.d $f13, \base, (13 * FPU_REG_WIDTH)
|
||||
EX fst.d $f14, \base, (14 * FPU_REG_WIDTH)
|
||||
EX fst.d $f15, \base, (15 * FPU_REG_WIDTH)
|
||||
EX fst.d $f16, \base, (16 * FPU_REG_WIDTH)
|
||||
EX fst.d $f17, \base, (17 * FPU_REG_WIDTH)
|
||||
EX fst.d $f18, \base, (18 * FPU_REG_WIDTH)
|
||||
EX fst.d $f19, \base, (19 * FPU_REG_WIDTH)
|
||||
EX fst.d $f20, \base, (20 * FPU_REG_WIDTH)
|
||||
EX fst.d $f21, \base, (21 * FPU_REG_WIDTH)
|
||||
EX fst.d $f22, \base, (22 * FPU_REG_WIDTH)
|
||||
EX fst.d $f23, \base, (23 * FPU_REG_WIDTH)
|
||||
EX fst.d $f24, \base, (24 * FPU_REG_WIDTH)
|
||||
EX fst.d $f25, \base, (25 * FPU_REG_WIDTH)
|
||||
EX fst.d $f26, \base, (26 * FPU_REG_WIDTH)
|
||||
EX fst.d $f27, \base, (27 * FPU_REG_WIDTH)
|
||||
EX fst.d $f28, \base, (28 * FPU_REG_WIDTH)
|
||||
EX fst.d $f29, \base, (29 * FPU_REG_WIDTH)
|
||||
EX fst.d $f30, \base, (30 * FPU_REG_WIDTH)
|
||||
EX fst.d $f31, \base, (31 * FPU_REG_WIDTH)
|
||||
EX fst.d $f0, \base, (0 * FPU_REG_WIDTH)
|
||||
EX fst.d $f1, \base, (1 * FPU_REG_WIDTH)
|
||||
EX fst.d $f2, \base, (2 * FPU_REG_WIDTH)
|
||||
EX fst.d $f3, \base, (3 * FPU_REG_WIDTH)
|
||||
EX fst.d $f4, \base, (4 * FPU_REG_WIDTH)
|
||||
EX fst.d $f5, \base, (5 * FPU_REG_WIDTH)
|
||||
EX fst.d $f6, \base, (6 * FPU_REG_WIDTH)
|
||||
EX fst.d $f7, \base, (7 * FPU_REG_WIDTH)
|
||||
EX fst.d $f8, \base, (8 * FPU_REG_WIDTH)
|
||||
EX fst.d $f9, \base, (9 * FPU_REG_WIDTH)
|
||||
EX fst.d $f10, \base, (10 * FPU_REG_WIDTH)
|
||||
EX fst.d $f11, \base, (11 * FPU_REG_WIDTH)
|
||||
EX fst.d $f12, \base, (12 * FPU_REG_WIDTH)
|
||||
EX fst.d $f13, \base, (13 * FPU_REG_WIDTH)
|
||||
EX fst.d $f14, \base, (14 * FPU_REG_WIDTH)
|
||||
EX fst.d $f15, \base, (15 * FPU_REG_WIDTH)
|
||||
EX fst.d $f16, \base, (16 * FPU_REG_WIDTH)
|
||||
EX fst.d $f17, \base, (17 * FPU_REG_WIDTH)
|
||||
EX fst.d $f18, \base, (18 * FPU_REG_WIDTH)
|
||||
EX fst.d $f19, \base, (19 * FPU_REG_WIDTH)
|
||||
EX fst.d $f20, \base, (20 * FPU_REG_WIDTH)
|
||||
EX fst.d $f21, \base, (21 * FPU_REG_WIDTH)
|
||||
EX fst.d $f22, \base, (22 * FPU_REG_WIDTH)
|
||||
EX fst.d $f23, \base, (23 * FPU_REG_WIDTH)
|
||||
EX fst.d $f24, \base, (24 * FPU_REG_WIDTH)
|
||||
EX fst.d $f25, \base, (25 * FPU_REG_WIDTH)
|
||||
EX fst.d $f26, \base, (26 * FPU_REG_WIDTH)
|
||||
EX fst.d $f27, \base, (27 * FPU_REG_WIDTH)
|
||||
EX fst.d $f28, \base, (28 * FPU_REG_WIDTH)
|
||||
EX fst.d $f29, \base, (29 * FPU_REG_WIDTH)
|
||||
EX fst.d $f30, \base, (30 * FPU_REG_WIDTH)
|
||||
EX fst.d $f31, \base, (31 * FPU_REG_WIDTH)
|
||||
.endm
|
||||
|
||||
.macro sc_restore_fp base
|
||||
EX fld.d $f0, \base, (0 * FPU_REG_WIDTH)
|
||||
EX fld.d $f1, \base, (1 * FPU_REG_WIDTH)
|
||||
EX fld.d $f2, \base, (2 * FPU_REG_WIDTH)
|
||||
EX fld.d $f3, \base, (3 * FPU_REG_WIDTH)
|
||||
EX fld.d $f4, \base, (4 * FPU_REG_WIDTH)
|
||||
EX fld.d $f5, \base, (5 * FPU_REG_WIDTH)
|
||||
EX fld.d $f6, \base, (6 * FPU_REG_WIDTH)
|
||||
EX fld.d $f7, \base, (7 * FPU_REG_WIDTH)
|
||||
EX fld.d $f8, \base, (8 * FPU_REG_WIDTH)
|
||||
EX fld.d $f9, \base, (9 * FPU_REG_WIDTH)
|
||||
EX fld.d $f10, \base, (10 * FPU_REG_WIDTH)
|
||||
EX fld.d $f11, \base, (11 * FPU_REG_WIDTH)
|
||||
EX fld.d $f12, \base, (12 * FPU_REG_WIDTH)
|
||||
EX fld.d $f13, \base, (13 * FPU_REG_WIDTH)
|
||||
EX fld.d $f14, \base, (14 * FPU_REG_WIDTH)
|
||||
EX fld.d $f15, \base, (15 * FPU_REG_WIDTH)
|
||||
EX fld.d $f16, \base, (16 * FPU_REG_WIDTH)
|
||||
EX fld.d $f17, \base, (17 * FPU_REG_WIDTH)
|
||||
EX fld.d $f18, \base, (18 * FPU_REG_WIDTH)
|
||||
EX fld.d $f19, \base, (19 * FPU_REG_WIDTH)
|
||||
EX fld.d $f20, \base, (20 * FPU_REG_WIDTH)
|
||||
EX fld.d $f21, \base, (21 * FPU_REG_WIDTH)
|
||||
EX fld.d $f22, \base, (22 * FPU_REG_WIDTH)
|
||||
EX fld.d $f23, \base, (23 * FPU_REG_WIDTH)
|
||||
EX fld.d $f24, \base, (24 * FPU_REG_WIDTH)
|
||||
EX fld.d $f25, \base, (25 * FPU_REG_WIDTH)
|
||||
EX fld.d $f26, \base, (26 * FPU_REG_WIDTH)
|
||||
EX fld.d $f27, \base, (27 * FPU_REG_WIDTH)
|
||||
EX fld.d $f28, \base, (28 * FPU_REG_WIDTH)
|
||||
EX fld.d $f29, \base, (29 * FPU_REG_WIDTH)
|
||||
EX fld.d $f30, \base, (30 * FPU_REG_WIDTH)
|
||||
EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
|
||||
EX fld.d $f0, \base, (0 * FPU_REG_WIDTH)
|
||||
EX fld.d $f1, \base, (1 * FPU_REG_WIDTH)
|
||||
EX fld.d $f2, \base, (2 * FPU_REG_WIDTH)
|
||||
EX fld.d $f3, \base, (3 * FPU_REG_WIDTH)
|
||||
EX fld.d $f4, \base, (4 * FPU_REG_WIDTH)
|
||||
EX fld.d $f5, \base, (5 * FPU_REG_WIDTH)
|
||||
EX fld.d $f6, \base, (6 * FPU_REG_WIDTH)
|
||||
EX fld.d $f7, \base, (7 * FPU_REG_WIDTH)
|
||||
EX fld.d $f8, \base, (8 * FPU_REG_WIDTH)
|
||||
EX fld.d $f9, \base, (9 * FPU_REG_WIDTH)
|
||||
EX fld.d $f10, \base, (10 * FPU_REG_WIDTH)
|
||||
EX fld.d $f11, \base, (11 * FPU_REG_WIDTH)
|
||||
EX fld.d $f12, \base, (12 * FPU_REG_WIDTH)
|
||||
EX fld.d $f13, \base, (13 * FPU_REG_WIDTH)
|
||||
EX fld.d $f14, \base, (14 * FPU_REG_WIDTH)
|
||||
EX fld.d $f15, \base, (15 * FPU_REG_WIDTH)
|
||||
EX fld.d $f16, \base, (16 * FPU_REG_WIDTH)
|
||||
EX fld.d $f17, \base, (17 * FPU_REG_WIDTH)
|
||||
EX fld.d $f18, \base, (18 * FPU_REG_WIDTH)
|
||||
EX fld.d $f19, \base, (19 * FPU_REG_WIDTH)
|
||||
EX fld.d $f20, \base, (20 * FPU_REG_WIDTH)
|
||||
EX fld.d $f21, \base, (21 * FPU_REG_WIDTH)
|
||||
EX fld.d $f22, \base, (22 * FPU_REG_WIDTH)
|
||||
EX fld.d $f23, \base, (23 * FPU_REG_WIDTH)
|
||||
EX fld.d $f24, \base, (24 * FPU_REG_WIDTH)
|
||||
EX fld.d $f25, \base, (25 * FPU_REG_WIDTH)
|
||||
EX fld.d $f26, \base, (26 * FPU_REG_WIDTH)
|
||||
EX fld.d $f27, \base, (27 * FPU_REG_WIDTH)
|
||||
EX fld.d $f28, \base, (28 * FPU_REG_WIDTH)
|
||||
EX fld.d $f29, \base, (29 * FPU_REG_WIDTH)
|
||||
EX fld.d $f30, \base, (30 * FPU_REG_WIDTH)
|
||||
EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
|
||||
.endm
|
||||
|
||||
.macro sc_save_fcc base, tmp0, tmp1
|
||||
movcf2gr \tmp0, $fcc0
|
||||
move \tmp1, \tmp0
|
||||
move \tmp1, \tmp0
|
||||
movcf2gr \tmp0, $fcc1
|
||||
bstrins.d \tmp1, \tmp0, 15, 8
|
||||
movcf2gr \tmp0, $fcc2
|
||||
@ -113,11 +113,11 @@
|
||||
bstrins.d \tmp1, \tmp0, 55, 48
|
||||
movcf2gr \tmp0, $fcc7
|
||||
bstrins.d \tmp1, \tmp0, 63, 56
|
||||
EX st.d \tmp1, \base, 0
|
||||
EX st.d \tmp1, \base, 0
|
||||
.endm
|
||||
|
||||
.macro sc_restore_fcc base, tmp0, tmp1
|
||||
EX ld.d \tmp0, \base, 0
|
||||
EX ld.d \tmp0, \base, 0
|
||||
bstrpick.d \tmp1, \tmp0, 7, 0
|
||||
movgr2cf $fcc0, \tmp1
|
||||
bstrpick.d \tmp1, \tmp0, 15, 8
|
||||
@ -138,11 +138,11 @@
|
||||
|
||||
.macro sc_save_fcsr base, tmp0
|
||||
movfcsr2gr \tmp0, fcsr0
|
||||
EX st.w \tmp0, \base, 0
|
||||
EX st.w \tmp0, \base, 0
|
||||
.endm
|
||||
|
||||
.macro sc_restore_fcsr base, tmp0
|
||||
EX ld.w \tmp0, \base, 0
|
||||
EX ld.w \tmp0, \base, 0
|
||||
movgr2fcsr fcsr0, \tmp0
|
||||
.endm
|
||||
|
||||
@ -151,9 +151,9 @@
|
||||
*/
|
||||
SYM_FUNC_START(_save_fp)
|
||||
fpu_save_csr a0 t1
|
||||
fpu_save_double a0 t1 # clobbers t1
|
||||
fpu_save_double a0 t1 # clobbers t1
|
||||
fpu_save_cc a0 t1 t2 # clobbers t1, t2
|
||||
jirl zero, ra, 0
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_fp)
|
||||
EXPORT_SYMBOL(_save_fp)
|
||||
|
||||
@ -161,10 +161,10 @@ EXPORT_SYMBOL(_save_fp)
|
||||
* Restore a thread's fp context.
|
||||
*/
|
||||
SYM_FUNC_START(_restore_fp)
|
||||
fpu_restore_double a0 t1 # clobbers t1
|
||||
fpu_restore_csr a0 t1
|
||||
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
|
||||
jirl zero, ra, 0
|
||||
fpu_restore_double a0 t1 # clobbers t1
|
||||
fpu_restore_csr a0 t1
|
||||
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_fp)
|
||||
|
||||
/*
|
||||
@ -216,7 +216,7 @@ SYM_FUNC_START(_init_fpu)
|
||||
movgr2fr.d $f30, t1
|
||||
movgr2fr.d $f31, t1
|
||||
|
||||
jirl zero, ra, 0
|
||||
jr ra
|
||||
SYM_FUNC_END(_init_fpu)
|
||||
|
||||
/*
|
||||
@ -225,11 +225,11 @@ SYM_FUNC_END(_init_fpu)
|
||||
* a2: fcsr
|
||||
*/
|
||||
SYM_FUNC_START(_save_fp_context)
|
||||
sc_save_fcc a1 t1 t2
|
||||
sc_save_fcsr a2 t1
|
||||
sc_save_fp a0
|
||||
li.w a0, 0 # success
|
||||
jirl zero, ra, 0
|
||||
sc_save_fcc a1 t1 t2
|
||||
sc_save_fcsr a2 t1
|
||||
sc_save_fp a0
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_fp_context)
|
||||
|
||||
/*
|
||||
@ -238,14 +238,14 @@ SYM_FUNC_END(_save_fp_context)
|
||||
* a2: fcsr
|
||||
*/
|
||||
SYM_FUNC_START(_restore_fp_context)
|
||||
sc_restore_fp a0
|
||||
sc_restore_fcc a1 t1 t2
|
||||
sc_restore_fcsr a2 t1
|
||||
li.w a0, 0 # success
|
||||
jirl zero, ra, 0
|
||||
sc_restore_fp a0
|
||||
sc_restore_fcc a1 t1 t2
|
||||
sc_restore_fcsr a2 t1
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_fp_context)
|
||||
|
||||
SYM_FUNC_START(fault)
|
||||
li.w a0, -EFAULT # failure
|
||||
jirl zero, ra, 0
|
||||
jr ra
|
||||
SYM_FUNC_END(fault)
|
||||
|
@ -28,23 +28,23 @@ SYM_FUNC_START(__arch_cpu_idle)
|
||||
nop
|
||||
idle 0
|
||||
/* end of rollback region */
|
||||
1: jirl zero, ra, 0
|
||||
1: jr ra
|
||||
SYM_FUNC_END(__arch_cpu_idle)
|
||||
|
||||
SYM_FUNC_START(handle_vint)
|
||||
BACKUP_T0T1
|
||||
SAVE_ALL
|
||||
la.abs t1, __arch_cpu_idle
|
||||
LONG_L t0, sp, PT_ERA
|
||||
LONG_L t0, sp, PT_ERA
|
||||
/* 32 byte rollback region */
|
||||
ori t0, t0, 0x1f
|
||||
xori t0, t0, 0x1f
|
||||
bne t0, t1, 1f
|
||||
LONG_S t0, sp, PT_ERA
|
||||
LONG_S t0, sp, PT_ERA
|
||||
1: move a0, sp
|
||||
move a1, sp
|
||||
la.abs t0, do_vint
|
||||
jirl ra, t0, 0
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_vint)
|
||||
|
||||
@ -72,7 +72,7 @@ SYM_FUNC_END(except_vec_cex)
|
||||
build_prep_\prep
|
||||
move a0, sp
|
||||
la.abs t0, do_\handler
|
||||
jirl ra, t0, 0
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_\exception)
|
||||
.endm
|
||||
@ -91,5 +91,5 @@ SYM_FUNC_END(except_vec_cex)
|
||||
|
||||
SYM_FUNC_START(handle_sys)
|
||||
la.abs t0, handle_syscall
|
||||
jirl zero, t0, 0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_sys)
|
||||
|
@ -32,7 +32,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
|
||||
/* We might not get launched at the address the kernel is linked to,
|
||||
so we jump there. */
|
||||
la.abs t0, 0f
|
||||
jirl zero, t0, 0
|
||||
jr t0
|
||||
0:
|
||||
la t0, __bss_start # clear .bss
|
||||
st.d zero, t0, 0
|
||||
@ -50,7 +50,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
|
||||
/* KSave3 used for percpu base, initialized as 0 */
|
||||
csrwr zero, PERCPU_BASE_KS
|
||||
/* GPR21 used for percpu base (runtime), initialized as 0 */
|
||||
or u0, zero, zero
|
||||
move u0, zero
|
||||
|
||||
la tp, init_thread_union
|
||||
/* Set the SP after an empty pt_regs. */
|
||||
@ -85,8 +85,8 @@ SYM_CODE_START(smpboot_entry)
|
||||
ld.d sp, t0, CPU_BOOT_STACK
|
||||
ld.d tp, t0, CPU_BOOT_TINFO
|
||||
|
||||
la.abs t0, 0f
|
||||
jirl zero, t0, 0
|
||||
la.abs t0, 0f
|
||||
jr t0
|
||||
0:
|
||||
bl start_secondary
|
||||
SYM_CODE_END(smpboot_entry)
|
||||
|
@ -193,7 +193,7 @@ static int fpr_set(struct task_struct *target,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
const int fcc_end = fcc_start + sizeof(u64);
|
||||
const int fcsr_start = fcc_start + sizeof(u64);
|
||||
int err;
|
||||
|
||||
BUG_ON(count % sizeof(elf_fpreg_t));
|
||||
@ -209,10 +209,12 @@ static int fpr_set(struct task_struct *target,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (count > 0)
|
||||
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcc,
|
||||
fcc_start, fcc_end);
|
||||
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcc, fcc_start,
|
||||
fcc_start + sizeof(u64));
|
||||
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcsr, fcsr_start,
|
||||
fcsr_start + sizeof(u32));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/console.h>
|
||||
|
||||
#include <acpi/reboot.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/loongarch.h>
|
||||
#include <asm/reboot.h>
|
||||
|
@ -126,7 +126,7 @@ static void __init parse_bios_table(const struct dmi_header *dm)
|
||||
char *dmi_data = (char *)dm;
|
||||
|
||||
bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET);
|
||||
b_info.bios_size = *(dmi_data + SMBIOS_BIOSSIZE_OFFSET);
|
||||
b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
|
||||
|
||||
if (bios_extern & LOONGSON_EFI_ENABLE)
|
||||
set_bit(EFI_BOOT, &efi.flags);
|
||||
|
@ -278,116 +278,29 @@ void loongson3_cpu_die(unsigned int cpu)
|
||||
mb();
|
||||
}
|
||||
|
||||
/*
|
||||
* The target CPU should go to XKPRANGE (uncached area) and flush
|
||||
* ICache/DCache/VCache before the control CPU can safely disable its clock.
|
||||
*/
|
||||
static void loongson3_play_dead(int *state_addr)
|
||||
void play_dead(void)
|
||||
{
|
||||
register int val;
|
||||
register void *addr;
|
||||
register uint64_t addr;
|
||||
register void (*init_fn)(void);
|
||||
|
||||
__asm__ __volatile__(
|
||||
" li.d %[addr], 0x8000000000000000\n"
|
||||
"1: cacop 0x8, %[addr], 0 \n" /* flush ICache */
|
||||
" cacop 0x8, %[addr], 1 \n"
|
||||
" cacop 0x8, %[addr], 2 \n"
|
||||
" cacop 0x8, %[addr], 3 \n"
|
||||
" cacop 0x9, %[addr], 0 \n" /* flush DCache */
|
||||
" cacop 0x9, %[addr], 1 \n"
|
||||
" cacop 0x9, %[addr], 2 \n"
|
||||
" cacop 0x9, %[addr], 3 \n"
|
||||
" addi.w %[sets], %[sets], -1 \n"
|
||||
" addi.d %[addr], %[addr], 0x40 \n"
|
||||
" bnez %[sets], 1b \n"
|
||||
" li.d %[addr], 0x8000000000000000\n"
|
||||
"2: cacop 0xa, %[addr], 0 \n" /* flush VCache */
|
||||
" cacop 0xa, %[addr], 1 \n"
|
||||
" cacop 0xa, %[addr], 2 \n"
|
||||
" cacop 0xa, %[addr], 3 \n"
|
||||
" cacop 0xa, %[addr], 4 \n"
|
||||
" cacop 0xa, %[addr], 5 \n"
|
||||
" cacop 0xa, %[addr], 6 \n"
|
||||
" cacop 0xa, %[addr], 7 \n"
|
||||
" cacop 0xa, %[addr], 8 \n"
|
||||
" cacop 0xa, %[addr], 9 \n"
|
||||
" cacop 0xa, %[addr], 10 \n"
|
||||
" cacop 0xa, %[addr], 11 \n"
|
||||
" cacop 0xa, %[addr], 12 \n"
|
||||
" cacop 0xa, %[addr], 13 \n"
|
||||
" cacop 0xa, %[addr], 14 \n"
|
||||
" cacop 0xa, %[addr], 15 \n"
|
||||
" addi.w %[vsets], %[vsets], -1 \n"
|
||||
" addi.d %[addr], %[addr], 0x40 \n"
|
||||
" bnez %[vsets], 2b \n"
|
||||
" li.w %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
|
||||
" st.w %[val], %[state_addr], 0 \n"
|
||||
" dbar 0 \n"
|
||||
" cacop 0x11, %[state_addr], 0 \n" /* flush entry of *state_addr */
|
||||
: [addr] "=&r" (addr), [val] "=&r" (val)
|
||||
: [state_addr] "r" (state_addr),
|
||||
[sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
|
||||
[vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
|
||||
|
||||
idle_task_exit();
|
||||
local_irq_enable();
|
||||
change_csr_ecfg(ECFG0_IM, ECFGF_IPI);
|
||||
set_csr_ecfg(ECFGF_IPI);
|
||||
__this_cpu_write(cpu_state, CPU_DEAD);
|
||||
|
||||
__asm__ __volatile__(
|
||||
" idle 0 \n"
|
||||
" li.w $t0, 0x1020 \n"
|
||||
" iocsrrd.d %[init_fn], $t0 \n" /* Get init PC */
|
||||
: [init_fn] "=&r" (addr)
|
||||
: /* No Input */
|
||||
: "a0");
|
||||
init_fn = __va(addr);
|
||||
__smp_mb();
|
||||
do {
|
||||
__asm__ __volatile__("idle 0\n\t");
|
||||
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
|
||||
} while (addr == 0);
|
||||
|
||||
init_fn = (void *)TO_CACHE(addr);
|
||||
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
|
||||
|
||||
init_fn();
|
||||
unreachable();
|
||||
}
|
||||
|
||||
void play_dead(void)
|
||||
{
|
||||
int *state_addr;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
void (*play_dead_uncached)(int *s);
|
||||
|
||||
idle_task_exit();
|
||||
play_dead_uncached = (void *)TO_UNCACHE(__pa((unsigned long)loongson3_play_dead));
|
||||
state_addr = &per_cpu(cpu_state, cpu);
|
||||
mb();
|
||||
play_dead_uncached(state_addr);
|
||||
}
|
||||
|
||||
static int loongson3_enable_clock(unsigned int cpu)
|
||||
{
|
||||
uint64_t core_id = cpu_data[cpu].core;
|
||||
uint64_t package_id = cpu_data[cpu].package;
|
||||
|
||||
LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int loongson3_disable_clock(unsigned int cpu)
|
||||
{
|
||||
uint64_t core_id = cpu_data[cpu].core;
|
||||
uint64_t package_id = cpu_data[cpu].package;
|
||||
|
||||
LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int register_loongson3_notifier(void)
|
||||
{
|
||||
return cpuhp_setup_state_nocalls(CPUHP_LOONGARCH_SOC_PREPARE,
|
||||
"loongarch/loongson:prepare",
|
||||
loongson3_enable_clock,
|
||||
loongson3_disable_clock);
|
||||
}
|
||||
early_initcall(register_loongson3_notifier);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -24,8 +24,8 @@ SYM_FUNC_START(__switch_to)
|
||||
move tp, a2
|
||||
cpu_restore_nonscratch a1
|
||||
|
||||
li.w t0, _THREAD_SIZE - 32
|
||||
PTR_ADD t0, t0, tp
|
||||
li.w t0, _THREAD_SIZE - 32
|
||||
PTR_ADD t0, t0, tp
|
||||
set_saved_sp t0, t1, t2
|
||||
|
||||
ldptr.d t1, a1, THREAD_CSRPRMD
|
||||
|
@ -32,7 +32,7 @@ SYM_FUNC_START(__clear_user)
|
||||
1: st.b zero, a0, 0
|
||||
addi.d a0, a0, 1
|
||||
addi.d a1, a1, -1
|
||||
bgt a1, zero, 1b
|
||||
bgtz a1, 1b
|
||||
|
||||
2: move a0, a1
|
||||
jr ra
|
||||
|
@ -35,7 +35,7 @@ SYM_FUNC_START(__copy_user)
|
||||
addi.d a0, a0, 1
|
||||
addi.d a1, a1, 1
|
||||
addi.d a2, a2, -1
|
||||
bgt a2, zero, 1b
|
||||
bgtz a2, 1b
|
||||
|
||||
3: move a0, a2
|
||||
jr ra
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/timex.h>
|
||||
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
void __delay(unsigned long cycles)
|
||||
|
@ -10,75 +10,75 @@
|
||||
|
||||
.align 5
|
||||
SYM_FUNC_START(clear_page)
|
||||
lu12i.w t0, 1 << (PAGE_SHIFT - 12)
|
||||
add.d t0, t0, a0
|
||||
lu12i.w t0, 1 << (PAGE_SHIFT - 12)
|
||||
add.d t0, t0, a0
|
||||
1:
|
||||
st.d zero, a0, 0
|
||||
st.d zero, a0, 8
|
||||
st.d zero, a0, 16
|
||||
st.d zero, a0, 24
|
||||
st.d zero, a0, 32
|
||||
st.d zero, a0, 40
|
||||
st.d zero, a0, 48
|
||||
st.d zero, a0, 56
|
||||
addi.d a0, a0, 128
|
||||
st.d zero, a0, -64
|
||||
st.d zero, a0, -56
|
||||
st.d zero, a0, -48
|
||||
st.d zero, a0, -40
|
||||
st.d zero, a0, -32
|
||||
st.d zero, a0, -24
|
||||
st.d zero, a0, -16
|
||||
st.d zero, a0, -8
|
||||
bne t0, a0, 1b
|
||||
st.d zero, a0, 0
|
||||
st.d zero, a0, 8
|
||||
st.d zero, a0, 16
|
||||
st.d zero, a0, 24
|
||||
st.d zero, a0, 32
|
||||
st.d zero, a0, 40
|
||||
st.d zero, a0, 48
|
||||
st.d zero, a0, 56
|
||||
addi.d a0, a0, 128
|
||||
st.d zero, a0, -64
|
||||
st.d zero, a0, -56
|
||||
st.d zero, a0, -48
|
||||
st.d zero, a0, -40
|
||||
st.d zero, a0, -32
|
||||
st.d zero, a0, -24
|
||||
st.d zero, a0, -16
|
||||
st.d zero, a0, -8
|
||||
bne t0, a0, 1b
|
||||
|
||||
jirl $r0, ra, 0
|
||||
jr ra
|
||||
SYM_FUNC_END(clear_page)
|
||||
EXPORT_SYMBOL(clear_page)
|
||||
|
||||
.align 5
|
||||
SYM_FUNC_START(copy_page)
|
||||
lu12i.w t8, 1 << (PAGE_SHIFT - 12)
|
||||
add.d t8, t8, a0
|
||||
lu12i.w t8, 1 << (PAGE_SHIFT - 12)
|
||||
add.d t8, t8, a0
|
||||
1:
|
||||
ld.d t0, a1, 0
|
||||
ld.d t1, a1, 8
|
||||
ld.d t2, a1, 16
|
||||
ld.d t3, a1, 24
|
||||
ld.d t4, a1, 32
|
||||
ld.d t5, a1, 40
|
||||
ld.d t6, a1, 48
|
||||
ld.d t7, a1, 56
|
||||
ld.d t0, a1, 0
|
||||
ld.d t1, a1, 8
|
||||
ld.d t2, a1, 16
|
||||
ld.d t3, a1, 24
|
||||
ld.d t4, a1, 32
|
||||
ld.d t5, a1, 40
|
||||
ld.d t6, a1, 48
|
||||
ld.d t7, a1, 56
|
||||
|
||||
st.d t0, a0, 0
|
||||
st.d t1, a0, 8
|
||||
ld.d t0, a1, 64
|
||||
ld.d t1, a1, 72
|
||||
st.d t2, a0, 16
|
||||
st.d t3, a0, 24
|
||||
ld.d t2, a1, 80
|
||||
ld.d t3, a1, 88
|
||||
st.d t4, a0, 32
|
||||
st.d t5, a0, 40
|
||||
ld.d t4, a1, 96
|
||||
ld.d t5, a1, 104
|
||||
st.d t6, a0, 48
|
||||
st.d t7, a0, 56
|
||||
ld.d t6, a1, 112
|
||||
ld.d t7, a1, 120
|
||||
addi.d a0, a0, 128
|
||||
addi.d a1, a1, 128
|
||||
st.d t0, a0, 0
|
||||
st.d t1, a0, 8
|
||||
ld.d t0, a1, 64
|
||||
ld.d t1, a1, 72
|
||||
st.d t2, a0, 16
|
||||
st.d t3, a0, 24
|
||||
ld.d t2, a1, 80
|
||||
ld.d t3, a1, 88
|
||||
st.d t4, a0, 32
|
||||
st.d t5, a0, 40
|
||||
ld.d t4, a1, 96
|
||||
ld.d t5, a1, 104
|
||||
st.d t6, a0, 48
|
||||
st.d t7, a0, 56
|
||||
ld.d t6, a1, 112
|
||||
ld.d t7, a1, 120
|
||||
addi.d a0, a0, 128
|
||||
addi.d a1, a1, 128
|
||||
|
||||
st.d t0, a0, -64
|
||||
st.d t1, a0, -56
|
||||
st.d t2, a0, -48
|
||||
st.d t3, a0, -40
|
||||
st.d t4, a0, -32
|
||||
st.d t5, a0, -24
|
||||
st.d t6, a0, -16
|
||||
st.d t7, a0, -8
|
||||
st.d t0, a0, -64
|
||||
st.d t1, a0, -56
|
||||
st.d t2, a0, -48
|
||||
st.d t3, a0, -40
|
||||
st.d t4, a0, -32
|
||||
st.d t5, a0, -24
|
||||
st.d t6, a0, -16
|
||||
st.d t7, a0, -8
|
||||
|
||||
bne t8, a0, 1b
|
||||
jirl $r0, ra, 0
|
||||
bne t8, a0, 1b
|
||||
jr ra
|
||||
SYM_FUNC_END(copy_page)
|
||||
EXPORT_SYMBOL(copy_page)
|
||||
|
@ -18,7 +18,7 @@
|
||||
REG_S a2, sp, PT_BVADDR
|
||||
li.w a1, \write
|
||||
la.abs t0, do_page_fault
|
||||
jirl ra, t0, 0
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(tlb_do_page_fault_\write)
|
||||
.endm
|
||||
@ -34,7 +34,7 @@ SYM_FUNC_START(handle_tlb_protect)
|
||||
csrrd a2, LOONGARCH_CSR_BADV
|
||||
REG_S a2, sp, PT_BVADDR
|
||||
la.abs t0, do_page_fault
|
||||
jirl ra, t0, 0
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_tlb_protect)
|
||||
|
||||
@ -47,7 +47,7 @@ SYM_FUNC_START(handle_tlb_load)
|
||||
* The vmalloc handling is not in the hotpath.
|
||||
*/
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
blt t0, $r0, vmalloc_load
|
||||
bltz t0, vmalloc_load
|
||||
csrrd t1, LOONGARCH_CSR_PGDL
|
||||
|
||||
vmalloc_done_load:
|
||||
@ -80,7 +80,7 @@ vmalloc_done_load:
|
||||
* see if we need to jump to huge tlb processing.
|
||||
*/
|
||||
andi t0, ra, _PAGE_HUGE
|
||||
bne t0, $r0, tlb_huge_update_load
|
||||
bnez t0, tlb_huge_update_load
|
||||
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
|
||||
@ -100,12 +100,12 @@ smp_pgtable_change_load:
|
||||
|
||||
srli.d ra, t0, _PAGE_PRESENT_SHIFT
|
||||
andi ra, ra, 1
|
||||
beq ra, $r0, nopage_tlb_load
|
||||
beqz ra, nopage_tlb_load
|
||||
|
||||
ori t0, t0, _PAGE_VALID
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, smp_pgtable_change_load
|
||||
beqz t0, smp_pgtable_change_load
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
@ -139,23 +139,23 @@ tlb_huge_update_load:
|
||||
#endif
|
||||
srli.d ra, t0, _PAGE_PRESENT_SHIFT
|
||||
andi ra, ra, 1
|
||||
beq ra, $r0, nopage_tlb_load
|
||||
beqz ra, nopage_tlb_load
|
||||
tlbsrch
|
||||
|
||||
ori t0, t0, _PAGE_VALID
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, tlb_huge_update_load
|
||||
beqz t0, tlb_huge_update_load
|
||||
ld.d t0, t1, 0
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
tlbwr
|
||||
|
||||
csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
|
||||
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
/*
|
||||
* A huge PTE describes an area the size of the
|
||||
@ -178,27 +178,27 @@ tlb_huge_update_load:
|
||||
addi.d t0, ra, 0
|
||||
|
||||
/* Convert to entrylo1 */
|
||||
addi.d t1, $r0, 1
|
||||
addi.d t1, zero, 1
|
||||
slli.d t1, t1, (HPAGE_SHIFT - 1)
|
||||
add.d t0, t0, t1
|
||||
csrwr t0, LOONGARCH_CSR_TLBELO1
|
||||
|
||||
/* Set huge page tlb entry size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
tlbfill
|
||||
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
nopage_tlb_load:
|
||||
dbar 0
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la.abs t0, tlb_do_page_fault_0
|
||||
jirl $r0, t0, 0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_load)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_store)
|
||||
@ -210,7 +210,7 @@ SYM_FUNC_START(handle_tlb_store)
|
||||
* The vmalloc handling is not in the hotpath.
|
||||
*/
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
blt t0, $r0, vmalloc_store
|
||||
bltz t0, vmalloc_store
|
||||
csrrd t1, LOONGARCH_CSR_PGDL
|
||||
|
||||
vmalloc_done_store:
|
||||
@ -244,7 +244,7 @@ vmalloc_done_store:
|
||||
* see if we need to jump to huge tlb processing.
|
||||
*/
|
||||
andi t0, ra, _PAGE_HUGE
|
||||
bne t0, $r0, tlb_huge_update_store
|
||||
bnez t0, tlb_huge_update_store
|
||||
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
|
||||
@ -265,12 +265,12 @@ smp_pgtable_change_store:
|
||||
srli.d ra, t0, _PAGE_PRESENT_SHIFT
|
||||
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
|
||||
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
|
||||
bne ra, $r0, nopage_tlb_store
|
||||
bnez ra, nopage_tlb_store
|
||||
|
||||
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, smp_pgtable_change_store
|
||||
beqz t0, smp_pgtable_change_store
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
@ -306,24 +306,24 @@ tlb_huge_update_store:
|
||||
srli.d ra, t0, _PAGE_PRESENT_SHIFT
|
||||
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
|
||||
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
|
||||
bne ra, $r0, nopage_tlb_store
|
||||
bnez ra, nopage_tlb_store
|
||||
|
||||
tlbsrch
|
||||
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, tlb_huge_update_store
|
||||
beqz t0, tlb_huge_update_store
|
||||
ld.d t0, t1, 0
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
tlbwr
|
||||
|
||||
csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
|
||||
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
|
||||
/*
|
||||
* A huge PTE describes an area the size of the
|
||||
* configured huge page size. This is twice the
|
||||
@ -345,28 +345,28 @@ tlb_huge_update_store:
|
||||
addi.d t0, ra, 0
|
||||
|
||||
/* Convert to entrylo1 */
|
||||
addi.d t1, $r0, 1
|
||||
addi.d t1, zero, 1
|
||||
slli.d t1, t1, (HPAGE_SHIFT - 1)
|
||||
add.d t0, t0, t1
|
||||
csrwr t0, LOONGARCH_CSR_TLBELO1
|
||||
|
||||
/* Set huge page tlb entry size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
tlbfill
|
||||
|
||||
/* Reset default page size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
nopage_tlb_store:
|
||||
dbar 0
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la.abs t0, tlb_do_page_fault_1
|
||||
jirl $r0, t0, 0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_store)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_modify)
|
||||
@ -378,7 +378,7 @@ SYM_FUNC_START(handle_tlb_modify)
|
||||
* The vmalloc handling is not in the hotpath.
|
||||
*/
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
blt t0, $r0, vmalloc_modify
|
||||
bltz t0, vmalloc_modify
|
||||
csrrd t1, LOONGARCH_CSR_PGDL
|
||||
|
||||
vmalloc_done_modify:
|
||||
@ -411,7 +411,7 @@ vmalloc_done_modify:
|
||||
* see if we need to jump to huge tlb processing.
|
||||
*/
|
||||
andi t0, ra, _PAGE_HUGE
|
||||
bne t0, $r0, tlb_huge_update_modify
|
||||
bnez t0, tlb_huge_update_modify
|
||||
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
|
||||
@ -431,12 +431,12 @@ smp_pgtable_change_modify:
|
||||
|
||||
srli.d ra, t0, _PAGE_WRITE_SHIFT
|
||||
andi ra, ra, 1
|
||||
beq ra, $r0, nopage_tlb_modify
|
||||
beqz ra, nopage_tlb_modify
|
||||
|
||||
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, smp_pgtable_change_modify
|
||||
beqz t0, smp_pgtable_change_modify
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
@ -454,7 +454,7 @@ leave_modify:
|
||||
ertn
|
||||
#ifdef CONFIG_64BIT
|
||||
vmalloc_modify:
|
||||
la.abs t1, swapper_pg_dir
|
||||
la.abs t1, swapper_pg_dir
|
||||
b vmalloc_done_modify
|
||||
#endif
|
||||
|
||||
@ -471,14 +471,14 @@ tlb_huge_update_modify:
|
||||
|
||||
srli.d ra, t0, _PAGE_WRITE_SHIFT
|
||||
andi ra, ra, 1
|
||||
beq ra, $r0, nopage_tlb_modify
|
||||
beqz ra, nopage_tlb_modify
|
||||
|
||||
tlbsrch
|
||||
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, tlb_huge_update_modify
|
||||
beqz t0, tlb_huge_update_modify
|
||||
ld.d t0, t1, 0
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
@ -504,28 +504,28 @@ tlb_huge_update_modify:
|
||||
addi.d t0, ra, 0
|
||||
|
||||
/* Convert to entrylo1 */
|
||||
addi.d t1, $r0, 1
|
||||
addi.d t1, zero, 1
|
||||
slli.d t1, t1, (HPAGE_SHIFT - 1)
|
||||
add.d t0, t0, t1
|
||||
csrwr t0, LOONGARCH_CSR_TLBELO1
|
||||
|
||||
/* Set huge page tlb entry size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
tlbwr
|
||||
|
||||
/* Reset default page size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
nopage_tlb_modify:
|
||||
dbar 0
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la.abs t0, tlb_do_page_fault_1
|
||||
jirl $r0, t0, 0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_modify)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_refill)
|
||||
|
@ -130,7 +130,6 @@ enum cpuhp_state {
|
||||
CPUHP_ZCOMP_PREPARE,
|
||||
CPUHP_TIMERS_PREPARE,
|
||||
CPUHP_MIPS_SOC_PREPARE,
|
||||
CPUHP_LOONGARCH_SOC_PREPARE,
|
||||
CPUHP_BP_PREPARE_DYN,
|
||||
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
|
||||
CPUHP_BRINGUP_CPU,
|
||||
|
Loading…
Reference in New Issue
Block a user