2008-05-19 23:52:27 +00:00
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* rtrap.S: Preparing for return from trap on Sparc V9.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
|
|
|
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <asm/asi.h>
|
|
|
|
#include <asm/pstate.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/spitfire.h>
|
|
|
|
#include <asm/head.h>
|
|
|
|
#include <asm/visasm.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
|
2008-10-30 04:25:00 +00:00
|
|
|
#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
|
|
|
|
#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
|
|
|
|
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
.text
|
|
|
|
.align 32
|
|
|
|
__handle_softirq:
|
|
|
|
call do_softirq
|
|
|
|
nop
|
|
|
|
ba,a,pt %xcc, __handle_softirq_continue
|
|
|
|
nop
|
|
|
|
__handle_preemption:
|
|
|
|
call schedule
|
|
|
|
wrpr %g0, RTRAP_PSTATE, %pstate
|
|
|
|
ba,pt %xcc, __handle_preemption_continue
|
|
|
|
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
|
|
|
|
|
|
|
|
__handle_user_windows:
|
|
|
|
call fault_in_user_windows
|
|
|
|
wrpr %g0, RTRAP_PSTATE, %pstate
|
2010-09-24 04:52:52 +00:00
|
|
|
ba,pt %xcc, __handle_preemption_continue
|
|
|
|
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
__handle_userfpu:
|
|
|
|
rd %fprs, %l5
|
|
|
|
andcc %l5, FPRS_FEF, %g0
|
|
|
|
sethi %hi(TSTATE_PEF), %o0
|
|
|
|
be,a,pn %icc, __handle_userfpu_continue
|
|
|
|
andn %l1, %o0, %l1
|
|
|
|
ba,a,pt %xcc, __handle_userfpu_continue
|
|
|
|
|
|
|
|
__handle_signal:
|
2006-01-19 10:42:49 +00:00
|
|
|
mov %l5, %o1
|
|
|
|
add %sp, PTREGS_OFF, %o0
|
2008-04-24 10:15:22 +00:00
|
|
|
mov %l0, %o2
|
2005-04-16 22:20:36 +00:00
|
|
|
call do_notify_resume
|
|
|
|
wrpr %g0, RTRAP_PSTATE, %pstate
|
|
|
|
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
|
|
|
|
|
|
|
|
/* Signal delivery can modify pt_regs tstate, so we must
|
|
|
|
* reload it.
|
|
|
|
*/
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
|
|
|
|
sethi %hi(0xf << 20), %l4
|
|
|
|
and %l1, %l4, %l4
|
2010-09-24 04:52:52 +00:00
|
|
|
ba,pt %xcc, __handle_preemption_continue
|
2005-04-16 22:20:36 +00:00
|
|
|
andn %l1, %l4, %l1
|
|
|
|
|
2008-11-26 06:24:59 +00:00
|
|
|
/* When returning from a NMI (%pil==15) interrupt we want to
|
|
|
|
* avoid running softirqs, doing IRQ tracing, preempting, etc.
|
|
|
|
*/
|
|
|
|
.globl rtrap_nmi
|
|
|
|
rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
|
|
|
|
sethi %hi(0xf << 20), %l4
|
|
|
|
and %l1, %l4, %l4
|
|
|
|
andn %l1, %l4, %l1
|
|
|
|
srl %l4, 20, %l4
|
|
|
|
ba,pt %xcc, rtrap_no_irq_enable
|
|
|
|
wrpr %l4, %pil
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
.align 64
|
2008-04-24 10:15:22 +00:00
|
|
|
.globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
|
2005-04-16 22:20:36 +00:00
|
|
|
rtrap_irq:
|
|
|
|
rtrap:
|
2005-08-30 05:46:43 +00:00
|
|
|
#ifndef CONFIG_SMP
|
2009-10-29 13:34:15 +00:00
|
|
|
sethi %hi(__cpu_data), %l0
|
|
|
|
lduw [%l0 + %lo(__cpu_data)], %l1
|
2005-08-30 05:46:43 +00:00
|
|
|
#else
|
2009-10-29 13:34:15 +00:00
|
|
|
sethi %hi(__cpu_data), %l0
|
|
|
|
or %l0, %lo(__cpu_data), %l0
|
2005-08-30 05:46:43 +00:00
|
|
|
lduw [%l0 + %g5], %l1
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
cmp %l1, 0
|
|
|
|
|
|
|
|
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
|
|
|
|
bne,pn %icc, __handle_softirq
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
|
|
|
|
__handle_softirq_continue:
|
|
|
|
rtrap_xcall:
|
|
|
|
sethi %hi(0xf << 20), %l4
|
|
|
|
and %l1, %l4, %l4
|
2006-11-16 21:38:57 +00:00
|
|
|
andn %l1, %l4, %l1
|
|
|
|
srl %l4, 20, %l4
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
brnz,pn %l4, rtrap_no_irq_enable
|
|
|
|
nop
|
|
|
|
call trace_hardirqs_on
|
|
|
|
nop
|
2010-04-20 07:48:37 +00:00
|
|
|
/* Do not actually set the %pil here. We will do that
|
|
|
|
* below after we clear PSTATE_IE in the %pstate register.
|
|
|
|
* If we re-enable interrupts here, we can recurse down
|
|
|
|
* the hardirq stack potentially endlessly, causing a
|
|
|
|
* stack overflow.
|
|
|
|
*
|
|
|
|
* It is tempting to put this test and trace_hardirqs_on
|
|
|
|
* call at the 'rt_continue' label, but that will not work
|
|
|
|
* as that path hits unconditionally and we do not want to
|
|
|
|
* execute this in NMI return paths, for example.
|
|
|
|
*/
|
2006-11-16 21:38:57 +00:00
|
|
|
#endif
|
2008-11-26 06:24:59 +00:00
|
|
|
rtrap_no_irq_enable:
|
2006-11-16 21:38:57 +00:00
|
|
|
andcc %l1, TSTATE_PRIV, %l3
|
2005-04-16 22:20:36 +00:00
|
|
|
bne,pn %icc, to_kernel
|
2006-11-16 21:38:57 +00:00
|
|
|
nop
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* We must hold IRQs off and atomically test schedule+signal
|
|
|
|
* state, then hold them off all the way back to userspace.
|
2006-11-16 21:38:57 +00:00
|
|
|
* If we are returning to kernel, none of this matters. Note
|
|
|
|
* that we are disabling interrupts via PSTATE_IE, not using
|
|
|
|
* %pil.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* If we do not do this, there is a window where we would do
|
|
|
|
* the tests, later the signal/resched event arrives but we do
|
|
|
|
* not process it since we are still in kernel mode. It would
|
|
|
|
* take until the next local IRQ before the signal/resched
|
|
|
|
* event would be handled.
|
|
|
|
*
|
2010-03-03 16:08:49 +00:00
|
|
|
* This also means that if we have to deal with user
|
|
|
|
* windows, we have to redo all of these sched+signal checks
|
|
|
|
* with IRQs disabled.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
|
|
|
|
wrpr 0, %pil
|
|
|
|
__handle_preemption_continue:
|
|
|
|
ldx [%g6 + TI_FLAGS], %l0
|
|
|
|
sethi %hi(_TIF_USER_WORK_MASK), %o0
|
|
|
|
or %o0, %lo(_TIF_USER_WORK_MASK), %o0
|
|
|
|
andcc %l0, %o0, %g0
|
|
|
|
sethi %hi(TSTATE_PEF), %o0
|
|
|
|
be,pt %xcc, user_nowork
|
|
|
|
andcc %l1, %o0, %g0
|
|
|
|
andcc %l0, _TIF_NEED_RESCHED, %g0
|
|
|
|
bne,pn %xcc, __handle_preemption
|
2008-04-20 22:06:49 +00:00
|
|
|
andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
|
2005-04-16 22:20:36 +00:00
|
|
|
bne,pn %xcc, __handle_signal
|
|
|
|
ldub [%g6 + TI_WSAVED], %o2
|
|
|
|
brnz,pn %o2, __handle_user_windows
|
|
|
|
nop
|
|
|
|
sethi %hi(TSTATE_PEF), %o0
|
2010-03-03 16:08:49 +00:00
|
|
|
andcc %l1, %o0, %g0
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* This fpdepth clear is necessary for non-syscall rtraps only */
|
|
|
|
user_nowork:
|
|
|
|
bne,pn %xcc, __handle_userfpu
|
|
|
|
stb %g0, [%g6 + TI_FPDEPTH]
|
|
|
|
__handle_userfpu_continue:
|
|
|
|
|
|
|
|
rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
|
|
|
|
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
|
2006-02-27 07:24:22 +00:00
|
|
|
brz,pt %l3, 1f
|
2006-02-06 05:59:03 +00:00
|
|
|
mov %g6, %l2
|
|
|
|
|
2006-02-27 07:24:22 +00:00
|
|
|
/* Must do this before thread reg is clobbered below. */
|
2006-02-03 05:55:10 +00:00
|
|
|
LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
|
2006-02-01 02:29:18 +00:00
|
|
|
1:
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
|
2005-04-16 22:20:36 +00:00
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
|
2006-02-06 05:29:28 +00:00
|
|
|
|
|
|
|
/* Normal globals are restored, go to trap globals. */
|
|
|
|
661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
|
2006-02-17 00:23:45 +00:00
|
|
|
nop
|
|
|
|
.section .sun4v_2insn_patch, "ax"
|
2006-02-06 05:29:28 +00:00
|
|
|
.word 661b
|
2006-02-17 00:23:45 +00:00
|
|
|
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
|
2006-02-06 05:29:28 +00:00
|
|
|
SET_GL(1)
|
|
|
|
.previous
|
|
|
|
|
2006-02-06 05:59:03 +00:00
|
|
|
mov %l2, %g6
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
|
|
|
|
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
|
|
|
|
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
|
|
|
|
|
|
|
|
ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
|
|
|
|
wr %o3, %g0, %y
|
|
|
|
wrpr %l4, 0x0, %pil
|
|
|
|
wrpr %g0, 0x1, %tl
|
sparc: Fix debugger syscall restart interactions.
So, forever, we've had this ptrace_signal_deliver implementation
which tries to handle all of the nasties that can occur when the
debugger looks at a process about to take a signal. It's meant
to address all of these issues inside of the kernel so that the
debugger need not be mindful of such things.
Problem is, this doesn't work.
The idea was that we should do the syscall restart business first, so
that the debugger captures that state. Otherwise, if the debugger for
example saves the child's state, makes the child execute something
else, then restores the saved state, we won't handle the syscall
restart properly because we lose the "we're in a syscall" state.
The code here worked for most cases, but if the debugger actually
passes the signal through to the child unaltered, it's possible that
we would do a syscall restart when we shouldn't have.
In particular this breaks the case of debugging a process under a gdb
which is being debugged by yet another gdb. gdb uses sigsuspend
to wait for SIGCHLD of the inferior, but if gdb itself is being
debugged by a top-level gdb we get a ptrace_stop(). The top-level gdb
does a PTRACE_CONT with SIGCHLD to let the inferior gdb see the
signal. But ptrace_signal_deliver() assumed the debugger would cancel
out the signal and therefore did a syscall restart, because the return
error was ERESTARTNOHAND.
Fix this by simply making ptrace_signal_deliver() a nop, and providing
a way for the debugger to control system call restarting properly:
1) Report a "in syscall" software bit in regs->{tstate,psr}.
It is set early on in trap entry to a system call and is fully
visible to the debugger via ptrace() and regsets.
2) Test this bit right before doing a syscall restart. We have
to do a final recheck right after get_signal_to_deliver() in
case the debugger cleared the bit during ptrace_stop().
3) Clear the bit in trap return so we don't accidently try to set
that bit in the real register.
As a result we also get a ptrace_{is,clear}_syscall() for sparc32 just
like sparc64 has.
M68K has this same exact bug, and is now the only other user of the
ptrace_signal_deliver hook. It needs to be fixed in the same exact
way as sparc.
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-05-11 09:07:19 +00:00
|
|
|
andn %l1, TSTATE_SYSCALL, %l1
|
2005-04-16 22:20:36 +00:00
|
|
|
wrpr %l1, %g0, %tstate
|
|
|
|
wrpr %l2, %g0, %tpc
|
|
|
|
wrpr %o2, %g0, %tnpc
|
|
|
|
|
|
|
|
brnz,pn %l3, kern_rtt
|
|
|
|
mov PRIMARY_CONTEXT, %l7
|
2006-02-08 06:13:05 +00:00
|
|
|
|
|
|
|
661: ldxa [%l7 + %l7] ASI_DMMU, %l0
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
ldxa [%l7 + %l7] ASI_MMU, %l0
|
|
|
|
.previous
|
|
|
|
|
2005-10-04 22:23:20 +00:00
|
|
|
sethi %hi(sparc64_kern_pri_nuc_bits), %l1
|
|
|
|
ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
|
2005-04-16 22:20:36 +00:00
|
|
|
or %l0, %l1, %l0
|
2006-02-08 06:13:05 +00:00
|
|
|
|
|
|
|
661: stxa %l0, [%l7] ASI_DMMU
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
stxa %l0, [%l7] ASI_MMU
|
|
|
|
.previous
|
|
|
|
|
2006-02-01 02:33:00 +00:00
|
|
|
sethi %hi(KERNBASE), %l7
|
|
|
|
flush %l7
|
2005-04-16 22:20:36 +00:00
|
|
|
rdpr %wstate, %l1
|
|
|
|
rdpr %otherwin, %l2
|
|
|
|
srl %l1, 3, %l1
|
|
|
|
|
|
|
|
wrpr %l2, %g0, %canrestore
|
|
|
|
wrpr %l1, %g0, %wstate
|
2006-02-04 08:10:01 +00:00
|
|
|
brnz,pt %l2, user_rtt_restore
|
|
|
|
wrpr %g0, %g0, %otherwin
|
|
|
|
|
|
|
|
ldx [%g6 + TI_FLAGS], %g3
|
|
|
|
wr %g0, ASI_AIUP, %asi
|
|
|
|
rdpr %cwp, %g1
|
|
|
|
andcc %g3, _TIF_32BIT, %g0
|
|
|
|
sub %g1, 1, %g1
|
|
|
|
bne,pt %xcc, user_rtt_fill_32bit
|
|
|
|
wrpr %g1, %cwp
|
|
|
|
ba,a,pt %xcc, user_rtt_fill_64bit
|
|
|
|
|
|
|
|
user_rtt_fill_fixup:
|
|
|
|
rdpr %cwp, %g1
|
|
|
|
add %g1, 1, %g1
|
|
|
|
wrpr %g1, 0x0, %cwp
|
|
|
|
|
|
|
|
rdpr %wstate, %g2
|
|
|
|
sll %g2, 3, %g2
|
|
|
|
wrpr %g2, 0x0, %wstate
|
|
|
|
|
|
|
|
/* We know %canrestore and %otherwin are both zero. */
|
|
|
|
|
|
|
|
sethi %hi(sparc64_kern_pri_context), %g2
|
|
|
|
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
|
|
|
|
mov PRIMARY_CONTEXT, %g1
|
2006-02-08 06:13:05 +00:00
|
|
|
|
|
|
|
661: stxa %g2, [%g1] ASI_DMMU
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
stxa %g2, [%g1] ASI_MMU
|
|
|
|
.previous
|
|
|
|
|
2006-02-04 08:10:01 +00:00
|
|
|
sethi %hi(KERNBASE), %g1
|
|
|
|
flush %g1
|
|
|
|
|
|
|
|
or %g4, FAULT_CODE_WINFIXUP, %g4
|
|
|
|
stb %g4, [%g6 + TI_FAULT_CODE]
|
|
|
|
stx %g5, [%g6 + TI_FAULT_ADDR]
|
|
|
|
|
|
|
|
mov %g6, %l1
|
|
|
|
wrpr %g0, 0x0, %tl
|
2006-02-06 05:29:28 +00:00
|
|
|
|
|
|
|
661: nop
|
2006-02-07 08:00:16 +00:00
|
|
|
.section .sun4v_1insn_patch, "ax"
|
2006-02-06 05:29:28 +00:00
|
|
|
.word 661b
|
|
|
|
SET_GL(0)
|
|
|
|
.previous
|
|
|
|
|
2006-02-23 00:15:45 +00:00
|
|
|
wrpr %g0, RTRAP_PSTATE, %pstate
|
|
|
|
|
2006-02-04 08:10:01 +00:00
|
|
|
mov %l1, %g6
|
|
|
|
ldx [%g6 + TI_TASK], %g4
|
|
|
|
LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
|
|
|
|
call do_sparc64_fault
|
|
|
|
add %sp, PTREGS_OFF, %o0
|
|
|
|
ba,pt %xcc, rtrap
|
|
|
|
nop
|
|
|
|
|
|
|
|
user_rtt_pre_restore:
|
|
|
|
add %g1, 1, %g1
|
|
|
|
wrpr %g1, 0x0, %cwp
|
|
|
|
|
|
|
|
user_rtt_restore:
|
2005-04-16 22:20:36 +00:00
|
|
|
restore
|
|
|
|
rdpr %canrestore, %g1
|
|
|
|
wrpr %g1, 0x0, %cleanwin
|
|
|
|
retry
|
|
|
|
nop
|
|
|
|
|
2006-02-04 08:10:01 +00:00
|
|
|
kern_rtt: rdpr %canrestore, %g1
|
|
|
|
brz,pn %g1, kern_rtt_fill
|
|
|
|
nop
|
|
|
|
kern_rtt_restore:
|
2008-05-22 04:50:01 +00:00
|
|
|
stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
|
2006-02-04 08:10:01 +00:00
|
|
|
restore
|
2005-04-16 22:20:36 +00:00
|
|
|
retry
|
2006-02-04 08:10:01 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
to_kernel:
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
ldsw [%g6 + TI_PRE_COUNT], %l5
|
|
|
|
brnz %l5, kern_fpucheck
|
|
|
|
ldx [%g6 + TI_FLAGS], %l5
|
|
|
|
andcc %l5, _TIF_NEED_RESCHED, %g0
|
|
|
|
be,pt %xcc, kern_fpucheck
|
2006-11-16 21:38:57 +00:00
|
|
|
nop
|
|
|
|
cmp %l4, 0
|
2005-04-16 22:20:36 +00:00
|
|
|
bne,pn %xcc, kern_fpucheck
|
|
|
|
sethi %hi(PREEMPT_ACTIVE), %l6
|
|
|
|
stw %l6, [%g6 + TI_PRE_COUNT]
|
|
|
|
call schedule
|
|
|
|
nop
|
|
|
|
ba,pt %xcc, rtrap
|
|
|
|
stw %g0, [%g6 + TI_PRE_COUNT]
|
|
|
|
#endif
|
|
|
|
kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
|
|
|
|
brz,pt %l5, rt_continue
|
|
|
|
srl %l5, 1, %o0
|
|
|
|
add %g6, TI_FPSAVED, %l6
|
|
|
|
ldub [%l6 + %o0], %l2
|
|
|
|
sub %l5, 2, %l5
|
|
|
|
|
|
|
|
add %g6, TI_GSR, %o1
|
|
|
|
andcc %l2, (FPRS_FEF|FPRS_DU), %g0
|
|
|
|
be,pt %icc, 2f
|
|
|
|
and %l2, FPRS_DL, %l6
|
|
|
|
andcc %l2, FPRS_FEF, %g0
|
|
|
|
be,pn %icc, 5f
|
|
|
|
sll %o0, 3, %o5
|
|
|
|
rd %fprs, %g1
|
|
|
|
|
|
|
|
wr %g1, FPRS_FEF, %fprs
|
|
|
|
ldx [%o1 + %o5], %g1
|
|
|
|
add %g6, TI_XFSR, %o1
|
|
|
|
sll %o0, 8, %o2
|
|
|
|
add %g6, TI_FPREGS, %o3
|
|
|
|
brz,pn %l6, 1f
|
|
|
|
add %g6, TI_FPREGS+0x40, %o4
|
|
|
|
|
2005-10-07 20:30:49 +00:00
|
|
|
membar #Sync
|
2005-04-16 22:20:36 +00:00
|
|
|
ldda [%o3 + %o2] ASI_BLK_P, %f0
|
|
|
|
ldda [%o4 + %o2] ASI_BLK_P, %f16
|
2005-10-07 20:30:49 +00:00
|
|
|
membar #Sync
|
2005-04-16 22:20:36 +00:00
|
|
|
1: andcc %l2, FPRS_DU, %g0
|
|
|
|
be,pn %icc, 1f
|
|
|
|
wr %g1, 0, %gsr
|
|
|
|
add %o2, 0x80, %o2
|
2005-10-07 20:30:49 +00:00
|
|
|
membar #Sync
|
2005-04-16 22:20:36 +00:00
|
|
|
ldda [%o3 + %o2] ASI_BLK_P, %f32
|
|
|
|
ldda [%o4 + %o2] ASI_BLK_P, %f48
|
|
|
|
1: membar #Sync
|
|
|
|
ldx [%o1 + %o5], %fsr
|
|
|
|
2: stb %l5, [%g6 + TI_FPDEPTH]
|
|
|
|
ba,pt %xcc, rt_continue
|
|
|
|
nop
|
|
|
|
5: wr %g0, FPRS_FEF, %fprs
|
|
|
|
sll %o0, 8, %o2
|
|
|
|
|
|
|
|
add %g6, TI_FPREGS+0x80, %o3
|
|
|
|
add %g6, TI_FPREGS+0xc0, %o4
|
2005-10-07 20:30:49 +00:00
|
|
|
membar #Sync
|
2005-04-16 22:20:36 +00:00
|
|
|
ldda [%o3 + %o2] ASI_BLK_P, %f32
|
|
|
|
ldda [%o4 + %o2] ASI_BLK_P, %f48
|
|
|
|
membar #Sync
|
|
|
|
wr %g0, FPRS_DU, %fprs
|
|
|
|
ba,pt %xcc, rt_continue
|
|
|
|
stb %l5, [%g6 + TI_FPDEPTH]
|