From fd1d362600e2d2edb6262d8e05661424c1a315bf Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 8 Sep 2017 16:14:00 -0700 Subject: [PATCH] ARM: implement memset32 & memset64 Reuse the existing optimised memset implementation to implement an optimised memset32 and memset64. Link: http://lkml.kernel.org/r/20170720184539.31609-5-willy@infradead.org Signed-off-by: Matthew Wilcox Reviewed-by: Russell King Cc: "H. Peter Anvin" Cc: "James E.J. Bottomley" Cc: "Martin K. Petersen" Cc: David Miller Cc: Ingo Molnar Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Michael Ellerman Cc: Minchan Kim Cc: Ralf Baechle Cc: Richard Henderson Cc: Sam Ravnborg Cc: Sergey Senozhatsky Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/string.h | 14 ++++++++++++++ arch/arm/kernel/armksyms.c | 2 ++ arch/arm/lib/memset.S | 24 ++++++++++++++++++------ 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h index cf4f3aad0fc1..fe1c6af3a1b1 100644 --- a/arch/arm/include/asm/string.h +++ b/arch/arm/include/asm/string.h @@ -24,6 +24,20 @@ extern void * memchr(const void *, int, __kernel_size_t); #define __HAVE_ARCH_MEMSET extern void * memset(void *, int, __kernel_size_t); +#define __HAVE_ARCH_MEMSET32 +extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); +static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n) +{ + return __memset32(p, v, n * 4); +} + +#define __HAVE_ARCH_MEMSET64 +extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi); +static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) +{ + return __memset64(p, v, n * 8, v >> 32); +} + extern void __memzero(void *ptr, __kernel_size_t n); #define memset(p,v,n) \ diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 8e8d20cdbce7..5266fd9ad6b4 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -87,6 +87,8 @@ EXPORT_SYMBOL(__raw_writesl); EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(__memset32); +EXPORT_SYMBOL(__memset64); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memchr); diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 3c65e3bd790f..ed6d35d9cdb5 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -28,7 +28,7 @@ UNWIND( .fnstart ) 1: orr r1, r1, r1, lsl #8 orr r1, r1, r1, lsl #16 mov r3, r1 - cmp r2, #16 +7: cmp r2, #16 blt 4f #if ! CALGN(1)+0 @@ -41,7 +41,7 @@ UNWIND( .fnend ) UNWIND( .fnstart ) UNWIND( .save {r8, lr} ) mov r8, r1 - mov lr, r1 + mov lr, r3 2: subs r2, r2, #64 stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time. @@ -73,11 +73,11 @@ UNWIND( .fnend ) UNWIND( .fnstart ) UNWIND( .save {r4-r8, lr} ) mov r4, r1 - mov r5, r1 + mov r5, r3 mov r6, r1 - mov r7, r1 + mov r7, r3 mov r8, r1 - mov lr, r1 + mov lr, r3 cmp r2, #96 tstgt ip, #31 @@ -114,7 +114,7 @@ UNWIND( .fnstart ) tst r2, #4 strne r1, [ip], #4 /* - * When we get here, we've got less than 4 bytes to zero. We + * When we get here, we've got less than 4 bytes to set. We * may have an unaligned pointer as well. */ 5: tst r2, #2 @@ -135,3 +135,15 @@ UNWIND( .fnstart ) UNWIND( .fnend ) ENDPROC(memset) ENDPROC(mmioset) + +ENTRY(__memset32) +UNWIND( .fnstart ) + mov r3, r1 @ copy r1 to r3 and fall into memset64 +UNWIND( .fnend ) +ENDPROC(__memset32) +ENTRY(__memset64) +UNWIND( .fnstart ) + mov ip, r0 @ preserve r0 as return value + b 7b @ jump into the middle of memset +UNWIND( .fnend ) +ENDPROC(__memset64)