forked from Minki/linux
bd6be579a7
Functions in math-emu are annotated as ENTRY() symbols, but their ends are not annotated at all. But these are standard functions called from C, with proper stack register update etc. Omitting the ends means: * the annotations are not paired and we cannot deal with such functions e.g. in objtool * the symbols are not marked as functions in the object file * there are no sizes of the functions in the object file So fix this by adding ENDPROC() to each such case in math-emu. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170824080624.7768-1-jslaby@suse.cz Signed-off-by: Ingo Molnar <mingo@kernel.org>
179 lines
4.1 KiB
ArmAsm
179 lines
4.1 KiB
ArmAsm
/*---------------------------------------------------------------------------+
|
|
| mul_Xsig.S |
|
|
| |
|
|
| Multiply a 12 byte fixed point number by another fixed point number. |
|
|
| |
|
|
| Copyright (C) 1992,1994,1995 |
|
|
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
|
|
| Australia. E-mail billm@jacobi.maths.monash.edu.au |
|
|
| |
|
|
| Call from C as: |
|
|
| void mul32_Xsig(Xsig *x, unsigned b) |
|
|
| |
|
|
| void mul64_Xsig(Xsig *x, unsigned long long *b) |
|
|
| |
|
|
| void mul_Xsig_Xsig(Xsig *x, unsigned *b) |
|
|
| |
|
|
| The result is neither rounded nor normalized, and the ls bit or so may |
|
|
| be wrong. |
|
|
| |
|
|
+---------------------------------------------------------------------------*/
|
|
.file "mul_Xsig.S"
|
|
|
|
|
|
#include "fpu_emu.h"
|
|
|
|
.text
|
|
ENTRY(mul32_Xsig)
|
|
pushl %ebp
|
|
movl %esp,%ebp
|
|
subl $16,%esp
|
|
pushl %esi
|
|
|
|
movl PARAM1,%esi
|
|
movl PARAM2,%ecx
|
|
|
|
xor %eax,%eax
|
|
movl %eax,-4(%ebp)
|
|
movl %eax,-8(%ebp)
|
|
|
|
movl (%esi),%eax /* lsl of Xsig */
|
|
mull %ecx /* msl of b */
|
|
movl %edx,-12(%ebp)
|
|
|
|
movl 4(%esi),%eax /* midl of Xsig */
|
|
mull %ecx /* msl of b */
|
|
addl %eax,-12(%ebp)
|
|
adcl %edx,-8(%ebp)
|
|
adcl $0,-4(%ebp)
|
|
|
|
movl 8(%esi),%eax /* msl of Xsig */
|
|
mull %ecx /* msl of b */
|
|
addl %eax,-8(%ebp)
|
|
adcl %edx,-4(%ebp)
|
|
|
|
movl -12(%ebp),%eax
|
|
movl %eax,(%esi)
|
|
movl -8(%ebp),%eax
|
|
movl %eax,4(%esi)
|
|
movl -4(%ebp),%eax
|
|
movl %eax,8(%esi)
|
|
|
|
popl %esi
|
|
leave
|
|
ret
|
|
ENDPROC(mul32_Xsig)
|
|
|
|
|
|
ENTRY(mul64_Xsig)
|
|
pushl %ebp
|
|
movl %esp,%ebp
|
|
subl $16,%esp
|
|
pushl %esi
|
|
|
|
movl PARAM1,%esi
|
|
movl PARAM2,%ecx
|
|
|
|
xor %eax,%eax
|
|
movl %eax,-4(%ebp)
|
|
movl %eax,-8(%ebp)
|
|
|
|
movl (%esi),%eax /* lsl of Xsig */
|
|
mull 4(%ecx) /* msl of b */
|
|
movl %edx,-12(%ebp)
|
|
|
|
movl 4(%esi),%eax /* midl of Xsig */
|
|
mull (%ecx) /* lsl of b */
|
|
addl %edx,-12(%ebp)
|
|
adcl $0,-8(%ebp)
|
|
adcl $0,-4(%ebp)
|
|
|
|
movl 4(%esi),%eax /* midl of Xsig */
|
|
mull 4(%ecx) /* msl of b */
|
|
addl %eax,-12(%ebp)
|
|
adcl %edx,-8(%ebp)
|
|
adcl $0,-4(%ebp)
|
|
|
|
movl 8(%esi),%eax /* msl of Xsig */
|
|
mull (%ecx) /* lsl of b */
|
|
addl %eax,-12(%ebp)
|
|
adcl %edx,-8(%ebp)
|
|
adcl $0,-4(%ebp)
|
|
|
|
movl 8(%esi),%eax /* msl of Xsig */
|
|
mull 4(%ecx) /* msl of b */
|
|
addl %eax,-8(%ebp)
|
|
adcl %edx,-4(%ebp)
|
|
|
|
movl -12(%ebp),%eax
|
|
movl %eax,(%esi)
|
|
movl -8(%ebp),%eax
|
|
movl %eax,4(%esi)
|
|
movl -4(%ebp),%eax
|
|
movl %eax,8(%esi)
|
|
|
|
popl %esi
|
|
leave
|
|
ret
|
|
ENDPROC(mul64_Xsig)
|
|
|
|
|
|
|
|
ENTRY(mul_Xsig_Xsig)
|
|
pushl %ebp
|
|
movl %esp,%ebp
|
|
subl $16,%esp
|
|
pushl %esi
|
|
|
|
movl PARAM1,%esi
|
|
movl PARAM2,%ecx
|
|
|
|
xor %eax,%eax
|
|
movl %eax,-4(%ebp)
|
|
movl %eax,-8(%ebp)
|
|
|
|
movl (%esi),%eax /* lsl of Xsig */
|
|
mull 8(%ecx) /* msl of b */
|
|
movl %edx,-12(%ebp)
|
|
|
|
movl 4(%esi),%eax /* midl of Xsig */
|
|
mull 4(%ecx) /* midl of b */
|
|
addl %edx,-12(%ebp)
|
|
adcl $0,-8(%ebp)
|
|
adcl $0,-4(%ebp)
|
|
|
|
movl 8(%esi),%eax /* msl of Xsig */
|
|
mull (%ecx) /* lsl of b */
|
|
addl %edx,-12(%ebp)
|
|
adcl $0,-8(%ebp)
|
|
adcl $0,-4(%ebp)
|
|
|
|
movl 4(%esi),%eax /* midl of Xsig */
|
|
mull 8(%ecx) /* msl of b */
|
|
addl %eax,-12(%ebp)
|
|
adcl %edx,-8(%ebp)
|
|
adcl $0,-4(%ebp)
|
|
|
|
movl 8(%esi),%eax /* msl of Xsig */
|
|
mull 4(%ecx) /* midl of b */
|
|
addl %eax,-12(%ebp)
|
|
adcl %edx,-8(%ebp)
|
|
adcl $0,-4(%ebp)
|
|
|
|
movl 8(%esi),%eax /* msl of Xsig */
|
|
mull 8(%ecx) /* msl of b */
|
|
addl %eax,-8(%ebp)
|
|
adcl %edx,-4(%ebp)
|
|
|
|
movl -12(%ebp),%edx
|
|
movl %edx,(%esi)
|
|
movl -8(%ebp),%edx
|
|
movl %edx,4(%esi)
|
|
movl -4(%ebp),%edx
|
|
movl %edx,8(%esi)
|
|
|
|
popl %esi
|
|
leave
|
|
ret
|
|
ENDPROC(mul_Xsig_Xsig)
|