xtensa: clean up functions in assembly code

Use ENTRY and ENDPROC throughout arch/xtensa/lib assembly sources.
Introduce asm/linkage.h and define xtensa-specific __ALIGN macro there.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
Max Filippov 2017-12-09 21:22:37 -08:00
parent fbb871e220
commit 5cf97ebd8b
6 changed files with 36 additions and 36 deletions

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
#endif

View File

@ -9,6 +9,7 @@
* Copyright (C) 2002 - 2012 Tensilica Inc.
*/
#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>
@ -108,10 +109,7 @@
addi a5, a5, 2
j .Ldstaligned # dst is now aligned, return to main algorithm
.align 4
.global memcpy
.type memcpy,@function
memcpy:
ENTRY(memcpy)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
@ -273,14 +271,14 @@ memcpy:
s8i a6, a5, 0
retw
ENDPROC(memcpy)
/*
* void bcopy(const void *src, void *dest, size_t n);
*/
.align 4
.global bcopy
.type bcopy,@function
bcopy:
ENTRY(bcopy)
entry sp, 16 # minimal stack frame
# a2=src, a3=dst, a4=len
mov a5, a3
@ -288,6 +286,8 @@ bcopy:
mov a2, a5
j .Lmovecommon # go to common code for memmove+bcopy
ENDPROC(bcopy)
/*
* void *memmove(void *dst, const void *src, size_t len);
*
@ -376,10 +376,7 @@ bcopy:
j .Lbackdstaligned # dst is now aligned,
# return to main algorithm
.align 4
.global memmove
.type memmove,@function
memmove:
ENTRY(memmove)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
@ -551,11 +548,4 @@ memmove:
s8i a6, a5, 0
retw
/*
* Local Variables:
* mode:fundamental
* comment-start: "# "
* comment-start-skip: "# *"
* End:
*/
ENDPROC(memmove)

View File

@ -11,6 +11,7 @@
* Copyright (C) 2002 Tensilica Inc.
*/
#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>
@ -30,10 +31,8 @@
*/
.text
.align 4
.global memset
.type memset,@function
memset:
ENTRY(memset)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ c, a4/ length
extui a3, a3, 0, 8 # mask to just 8 bits
@ -141,6 +140,7 @@ EX(10f) s8i a3, a5, 0
.Lbytesetdone:
retw
ENDPROC(memset)
.section .fixup, "ax"
.align 4

View File

@ -12,6 +12,7 @@
*/
#include <linux/errno.h>
#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>
@ -47,10 +48,8 @@
# a12/ tmp
.text
.align 4
.global __strncpy_user
.type __strncpy_user,@function
__strncpy_user:
ENTRY(__strncpy_user)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register
@ -202,6 +201,7 @@ EX(10f) s8i a9, a11, 0
sub a2, a11, a2 # compute strlen
retw
ENDPROC(__strncpy_user)
.section .fixup, "ax"
.align 4

View File

@ -11,6 +11,7 @@
* Copyright (C) 2002 Tensilica Inc.
*/
#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>
@ -42,10 +43,8 @@
# a10/ tmp
.text
.align 4
.global __strnlen_user
.type __strnlen_user,@function
__strnlen_user:
ENTRY(__strnlen_user)
entry sp, 16 # minimal stack frame
# a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end;
@ -133,6 +132,8 @@ EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
sub a2, a4, a2 # subtract to get length
retw
ENDPROC(__strnlen_user)
.section .fixup, "ax"
.align 4
10:

View File

@ -53,14 +53,13 @@
* a11/ original length
*/
#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>
.text
.align 4
.global __xtensa_copy_user
.type __xtensa_copy_user,@function
__xtensa_copy_user:
ENTRY(__xtensa_copy_user)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value
@ -267,6 +266,7 @@ EX(10f) s8i a6, a5, 0
movi a2, 0 # return success for len bytes copied
retw
ENDPROC(__xtensa_copy_user)
.section .fixup, "ax"
.align 4