forked from Minki/linux
534be1d5a2
Linux expects that if a CPU modifies a memory location, then that modification will eventually become visible to other CPUs in the system. On an ARM11MPCore processor, loads are prioritised over stores so it is possible for a store operation to be postponed if a polling loop immediately follows it. If the variable being polled indirectly depends on the outstanding store [for example, another CPU may be polling the variable that is pending modification] then there is the potential for deadlock if interrupts are disabled. This deadlock occurs in the KGDB testsuire when executing on an SMP ARM11MPCore configuration. This patch changes the definition of cpu_relax() to smp_mb() for ARMv6 cores, forcing a flushing of the write buffer on SMP systems before the next load takes place. If the Kernel is not compiled for SMP support, this will expand to a barrier() as before. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
137 lines
3.0 KiB
C
137 lines
3.0 KiB
C
/*
|
|
* arch/arm/include/asm/processor.h
|
|
*
|
|
* Copyright (C) 1995-1999 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#ifndef __ASM_ARM_PROCESSOR_H
|
|
#define __ASM_ARM_PROCESSOR_H
|
|
|
|
/*
|
|
* Default implementation of macro that returns current
|
|
* instruction pointer ("program counter").
|
|
*/
|
|
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <asm/ptrace.h>
|
|
#include <asm/types.h>
|
|
|
|
#ifdef __KERNEL__
|
|
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
|
|
TASK_SIZE : TASK_SIZE_26)
|
|
#define STACK_TOP_MAX TASK_SIZE
|
|
#endif
|
|
|
|
union debug_insn {
|
|
u32 arm;
|
|
u16 thumb;
|
|
};
|
|
|
|
struct debug_entry {
|
|
u32 address;
|
|
union debug_insn insn;
|
|
};
|
|
|
|
struct debug_info {
|
|
int nsaved;
|
|
struct debug_entry bp[2];
|
|
};
|
|
|
|
struct thread_struct {
|
|
/* fault info */
|
|
unsigned long address;
|
|
unsigned long trap_no;
|
|
unsigned long error_code;
|
|
/* debugging */
|
|
struct debug_info debug;
|
|
};
|
|
|
|
#define INIT_THREAD { }
|
|
|
|
#ifdef CONFIG_MMU
|
|
#define nommu_start_thread(regs) do { } while (0)
|
|
#else
|
|
#define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data
|
|
#endif
|
|
|
|
#define start_thread(regs,pc,sp) \
|
|
({ \
|
|
unsigned long *stack = (unsigned long *)sp; \
|
|
set_fs(USER_DS); \
|
|
memset(regs->uregs, 0, sizeof(regs->uregs)); \
|
|
if (current->personality & ADDR_LIMIT_32BIT) \
|
|
regs->ARM_cpsr = USR_MODE; \
|
|
else \
|
|
regs->ARM_cpsr = USR26_MODE; \
|
|
if (elf_hwcap & HWCAP_THUMB && pc & 1) \
|
|
regs->ARM_cpsr |= PSR_T_BIT; \
|
|
regs->ARM_cpsr |= PSR_ENDSTATE; \
|
|
regs->ARM_pc = pc & ~1; /* pc */ \
|
|
regs->ARM_sp = sp; /* sp */ \
|
|
regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
|
|
regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
|
|
regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
|
|
nommu_start_thread(regs); \
|
|
})
|
|
|
|
/* Forward declaration, a strange C thing */
|
|
struct task_struct;
|
|
|
|
/* Free all resources held by a thread. */
|
|
extern void release_thread(struct task_struct *);
|
|
|
|
/* Prepare to copy thread state - unlazy all lazy status */
|
|
#define prepare_to_copy(tsk) do { } while (0)
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
#if __LINUX_ARM_ARCH__ == 6
|
|
#define cpu_relax() smp_mb()
|
|
#else
|
|
#define cpu_relax() barrier()
|
|
#endif
|
|
|
|
/*
|
|
* Create a new kernel thread
|
|
*/
|
|
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
|
|
|
|
#define task_pt_regs(p) \
|
|
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
|
|
|
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
|
|
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
|
|
|
|
/*
|
|
* Prefetching support - only ARMv5.
|
|
*/
|
|
#if __LINUX_ARM_ARCH__ >= 5
|
|
|
|
#define ARCH_HAS_PREFETCH
|
|
static inline void prefetch(const void *ptr)
|
|
{
|
|
__asm__ __volatile__(
|
|
"pld\t%a0"
|
|
:
|
|
: "p" (ptr)
|
|
: "cc");
|
|
}
|
|
|
|
#define ARCH_HAS_PREFETCHW
|
|
#define prefetchw(ptr) prefetch(ptr)
|
|
|
|
#define ARCH_HAS_SPINLOCK_PREFETCH
|
|
#define spin_lock_prefetch(x) do { } while (0)
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif /* __ASM_ARM_PROCESSOR_H */
|