sparc64: Use pause instruction when available.
In atomic backoff and cpu_relax(), use the pause instruction found on SPARC-T4 and later. It makes the cpu strand unselectable for the given number of cycles, unless an intervening disrupting trap occurs. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
270c10e00a
commit
e9b9eb59ff
arch/sparc
@ -11,19 +11,25 @@
|
|||||||
#define BACKOFF_LABEL(spin_label, continue_label) \
|
#define BACKOFF_LABEL(spin_label, continue_label) \
|
||||||
spin_label
|
spin_label
|
||||||
|
|
||||||
#define BACKOFF_SPIN(reg, tmp, label) \
|
#define BACKOFF_SPIN(reg, tmp, label) \
|
||||||
mov reg, tmp; \
|
mov reg, tmp; \
|
||||||
88: rd %ccr, %g0; \
|
88: rd %ccr, %g0; \
|
||||||
rd %ccr, %g0; \
|
rd %ccr, %g0; \
|
||||||
rd %ccr, %g0; \
|
rd %ccr, %g0; \
|
||||||
brnz,pt tmp, 88b; \
|
.section .pause_patch,"ax"; \
|
||||||
sub tmp, 1, tmp; \
|
.word 88b; \
|
||||||
set BACKOFF_LIMIT, tmp; \
|
sllx tmp, 7, tmp; \
|
||||||
cmp reg, tmp; \
|
wr tmp, 0, %asr27; \
|
||||||
bg,pn %xcc, label; \
|
clr tmp; \
|
||||||
nop; \
|
.previous; \
|
||||||
ba,pt %xcc, label; \
|
brnz,pt tmp, 88b; \
|
||||||
sllx reg, 1, reg;
|
sub tmp, 1, tmp; \
|
||||||
|
set BACKOFF_LIMIT, tmp; \
|
||||||
|
cmp reg, tmp; \
|
||||||
|
bg,pn %xcc, label; \
|
||||||
|
nop; \
|
||||||
|
ba,pt %xcc, label; \
|
||||||
|
sllx reg, 1, reg;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
@ -196,9 +196,16 @@ extern unsigned long get_wchan(struct task_struct *task);
|
|||||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
|
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
|
||||||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
|
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
|
||||||
|
|
||||||
#define cpu_relax() asm volatile("rd %%ccr, %%g0\n\t" \
|
#define cpu_relax() asm volatile("\n99:\n\t" \
|
||||||
"rd %%ccr, %%g0\n\t" \
|
"rd %%ccr, %%g0\n\t" \
|
||||||
"rd %%ccr, %%g0" \
|
"rd %%ccr, %%g0\n\t" \
|
||||||
|
"rd %%ccr, %%g0\n\t" \
|
||||||
|
".section .pause_patch,\"ax\"\n\t"\
|
||||||
|
".word 99b\n\t" \
|
||||||
|
"wr %%g0, 128, %%asr27\n\t" \
|
||||||
|
"nop\n\t" \
|
||||||
|
"nop\n\t" \
|
||||||
|
".previous" \
|
||||||
::: "memory")
|
::: "memory")
|
||||||
|
|
||||||
/* Prefetch support. This is tuned for UltraSPARC-III and later.
|
/* Prefetch support. This is tuned for UltraSPARC-III and later.
|
||||||
|
@ -59,6 +59,13 @@ struct popc_6insn_patch_entry {
|
|||||||
extern struct popc_6insn_patch_entry __popc_6insn_patch,
|
extern struct popc_6insn_patch_entry __popc_6insn_patch,
|
||||||
__popc_6insn_patch_end;
|
__popc_6insn_patch_end;
|
||||||
|
|
||||||
|
struct pause_patch_entry {
|
||||||
|
unsigned int addr;
|
||||||
|
unsigned int insns[3];
|
||||||
|
};
|
||||||
|
extern struct pause_patch_entry __pause_patch,
|
||||||
|
__pause_patch_end;
|
||||||
|
|
||||||
extern void __init per_cpu_patch(void);
|
extern void __init per_cpu_patch(void);
|
||||||
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
|
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
|
||||||
struct sun4v_1insn_patch_entry *);
|
struct sun4v_1insn_patch_entry *);
|
||||||
|
@ -316,6 +316,25 @@ static void __init popc_patch(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init pause_patch(void)
|
||||||
|
{
|
||||||
|
struct pause_patch_entry *p;
|
||||||
|
|
||||||
|
p = &__pause_patch;
|
||||||
|
while (p < &__pause_patch_end) {
|
||||||
|
unsigned long i, addr = p->addr;
|
||||||
|
|
||||||
|
for (i = 0; i < 3; i++) {
|
||||||
|
*(unsigned int *) (addr + (i * 4)) = p->insns[i];
|
||||||
|
wmb();
|
||||||
|
__asm__ __volatile__("flush %0"
|
||||||
|
: : "r" (addr + (i * 4)));
|
||||||
|
}
|
||||||
|
|
||||||
|
p++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
void __init boot_cpu_id_too_large(int cpu)
|
void __init boot_cpu_id_too_large(int cpu)
|
||||||
{
|
{
|
||||||
@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||||||
|
|
||||||
if (sparc64_elf_hwcap & AV_SPARC_POPC)
|
if (sparc64_elf_hwcap & AV_SPARC_POPC)
|
||||||
popc_patch();
|
popc_patch();
|
||||||
|
if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
|
||||||
|
pause_patch();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init setup_arch(char **cmdline_p)
|
void __init setup_arch(char **cmdline_p)
|
||||||
|
@ -132,6 +132,11 @@ SECTIONS
|
|||||||
*(.popc_6insn_patch)
|
*(.popc_6insn_patch)
|
||||||
__popc_6insn_patch_end = .;
|
__popc_6insn_patch_end = .;
|
||||||
}
|
}
|
||||||
|
.pause_patch : {
|
||||||
|
__pause_patch = .;
|
||||||
|
*(.pause_patch)
|
||||||
|
__pause_patch_end = .;
|
||||||
|
}
|
||||||
PERCPU_SECTION(SMP_CACHE_BYTES)
|
PERCPU_SECTION(SMP_CACHE_BYTES)
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
|
Loading…
Reference in New Issue
Block a user