arch/csky patches for 5.13-rc1

Just 1 feature and 3 fixups.
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCgAwFiEE2KAv+isbWR/viAKHAXH1GYaIxXsFAmCQHXUSHGd1b3JlbkBr
 ZXJuZWwub3JnAAoJEAFx9RmGiMV7QbEP/A5noQq5vO5DcIywFpyjw1Ca+kS8WVKq
 67cQMn+vlaD4RAllWkjPnatg0OkLqvpsp8pJytM4RmmCOuoaMtKQGXiCU01u1t3V
 3CZifEBTg0ZzlliSjsEzaA8RvleLURlkHEcswVILklMESajvqkpHsnTSbkoctrrr
 cD1A1Dwyas5oSrj5Qfdsz52u0jmeY+gmtK8NMQr3rbKBAHdu+D1NpeADnrD1uWs6
 Fcw7qZZ5AmOffQ73JgqPiJ0YWpiP6GBn7pTst8PHrBrG8CR3XYUV+OdbPkiTvznb
 GNYUJ7tum9b7JNEHW7w2EQO0Pz96bbSczddZPrNV06HtC67hKqjsPS2WTDCGF/f7
 wzSgaUyKoHA/F1FVbPJbP7NeGDsqAfwHdnkgR86MKYfP4jE6+zECFvbfxKrmw8Ob
 5Owp9TF3vS7hO5wJA550e79xKdw2fVuvJ5k+ZoKpgQYifRexxIRfXNLANxfCv1sx
 6hYt/NZWsRJSGcZ4cr1BQvp+pQq/zjsP9Z4x+2aKORG9tz2uSaf2T7XkQDkoJxln
 SxvDDYWDRi2khzZ4IGTiavTNC4C3WsBKvfKBYCGYKG1AH8uQQt+cK1+wBQaERmgG
 9AK4mkOWtlw6GPl+rKzum/15toCFZywU0idx19t/2pxWxTTmzAly/Z4xCQant6YY
 PZGmQYwAKKVV
 =jXN4
 -----END PGP SIGNATURE-----

Merge tag 'csky-for-linus-5.13-rc1' of git://github.com/c-sky/csky-linux

Pull arch/csky updates from Guo Ren:
 "Just cleanups"

* tag 'csky-for-linus-5.13-rc1' of git://github.com/c-sky/csky-linux:
  csky: uaccess.h: Coding convention with asm generic
  csky: fix syscache.c fallthrough warning
  csky: Fixup typos
  csky: Remove duplicate include in arch/csky/kernel/entry.S
This commit is contained in:
Linus Torvalds 2021-05-03 12:58:31 -07:00
commit cda689f870
10 changed files with 359 additions and 493 deletions

View File

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
generic-y += extable.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += qrwlock.h

View File

@ -37,7 +37,7 @@ void asid_new_context(struct asid_info *info, atomic64_t *pasid,
* Check the ASID is still valid for the context. If not generate a new ASID.
*
* @pasid: Pointer to the current ASID batch
* @cpu: current CPU ID. Must have been acquired throught get_cpu()
* @cpu: current CPU ID. Must have been acquired through get_cpu()
*/
static inline void asid_check_context(struct asid_info *info,
atomic64_t *pasid, unsigned int cpu,

View File

@ -64,7 +64,7 @@
/*
* sync: completion barrier, all sync.xx instructions
* guarantee the last response recieved by bus transaction
* guarantee the last response received by bus transaction
* made by ld/st instructions before sync.s
* sync.s: inherit from sync, but also shareable to other cores
* sync.i: inherit from sync, but also flush cpu pipeline

View File

@ -7,11 +7,4 @@ typedef struct {
unsigned long seg;
} mm_segment_t;
#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF })
#define USER_DS ((mm_segment_t) { PAGE_OFFSET })
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#endif /* __ASM_CSKY_SEGMENT_H */

View File

@ -3,122 +3,26 @@
#ifndef __ASM_CSKY_UACCESS_H
#define __ASM_CSKY_UACCESS_H
/*
* User space memory access functions
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/version.h>
#include <asm/segment.h>
#define user_addr_max() \
(uaccess_kernel() ? KERNEL_DS.seg : get_fs().seg)
static inline int access_ok(const void *addr, unsigned long size)
static inline int __access_ok(unsigned long addr, unsigned long size)
{
unsigned long limit = current_thread_info()->addr_limit.seg;
return (((unsigned long)addr < limit) &&
((unsigned long)(addr + size) < limit));
return ((addr < limit) && ((addr + size) < limit));
}
#define __access_ok __access_ok
#define __addr_ok(addr) (access_ok(addr, 0))
/*
* __put_user_fn
*/
extern int __put_user_bad(void);
/*
* Tell gcc we read from memory instead of writing: this is because
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*
* This gets kind of ugly. We want to return _two_ values in "get_user()"
* and yet we don't want to do any pointers, because that is too much
* of a performance impact. Thus we have a few rather ugly macros here,
* and hide all the ugliness from the user.
*
* The "__xxx" versions of the user access functions are versions that
* do not verify the address space, that must have been done previously
* with a separate "access_ok()" call (this is used when we do multiple
* accesses to the same area of user memory).
*
* As we use the same address space for kernel and user data on
* Ckcore, we can just do these as direct assignments. (Of course, the
* exception handling means that it's no longer "just"...)
*/
#define put_user(x, ptr) \
__put_user_check((x), (ptr), sizeof(*(ptr)))
#define __put_user(x, ptr) \
__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
#define __ptr(x) ((unsigned long *)(x))
#define get_user(x, ptr) \
__get_user_check((x), (ptr), sizeof(*(ptr)))
#define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
#define __put_user_nocheck(x, ptr, size) \
({ \
long __pu_err = 0; \
typeof(*(ptr)) *__pu_addr = (ptr); \
typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
if (__pu_addr) \
__put_user_size(__pu_val, (__pu_addr), (size), \
__pu_err); \
__pu_err; \
})
#define __put_user_check(x, ptr, size) \
({ \
long __pu_err = -EFAULT; \
typeof(*(ptr)) *__pu_addr = (ptr); \
typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
if (access_ok(__pu_addr, size) && __pu_addr) \
__put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
__pu_err; \
})
#define __put_user_size(x, ptr, size, retval) \
do { \
retval = 0; \
switch (size) { \
case 1: \
__put_user_asm_b(x, ptr, retval); \
break; \
case 2: \
__put_user_asm_h(x, ptr, retval); \
break; \
case 4: \
__put_user_asm_w(x, ptr, retval); \
break; \
case 8: \
__put_user_asm_64(x, ptr, retval); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
* are no aliasing issues.
*
* Note that PC at a fault is the address *after* the faulting
* instruction.
*/
#define __put_user_asm_b(x, ptr, err) \
do { \
int errcode; \
asm volatile( \
__asm__ __volatile__( \
"1: stb %1, (%2,0) \n" \
" br 3f \n" \
"2: mov %0, %3 \n" \
@ -136,7 +40,7 @@ do { \
#define __put_user_asm_h(x, ptr, err) \
do { \
int errcode; \
asm volatile( \
__asm__ __volatile__( \
"1: sth %1, (%2,0) \n" \
" br 3f \n" \
"2: mov %0, %3 \n" \
@ -154,7 +58,7 @@ do { \
#define __put_user_asm_w(x, ptr, err) \
do { \
int errcode; \
asm volatile( \
__asm__ __volatile__( \
"1: stw %1, (%2,0) \n" \
" br 3f \n" \
"2: mov %0, %3 \n" \
@ -169,239 +73,149 @@ do { \
: "memory"); \
} while (0)
#define __put_user_asm_64(x, ptr, err) \
do { \
int tmp; \
int errcode; \
typeof(*(ptr))src = (typeof(*(ptr)))x; \
typeof(*(ptr))*psrc = &src; \
\
asm volatile( \
" ldw %3, (%1, 0) \n" \
"1: stw %3, (%2, 0) \n" \
" ldw %3, (%1, 4) \n" \
"2: stw %3, (%2, 4) \n" \
" br 4f \n" \
"3: mov %0, %4 \n" \
" br 4f \n" \
".section __ex_table, \"a\" \n" \
".align 2 \n" \
".long 1b, 3b \n" \
".long 2b, 3b \n" \
".previous \n" \
"4: \n" \
: "=r"(err), "=r"(psrc), "=r"(ptr), \
"=r"(tmp), "=r"(errcode) \
: "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \
: "memory"); \
} while (0)
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err; \
__get_user_size(x, (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
int __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
if (access_ok(__gu_ptr, size) && __gu_ptr) \
__get_user_size(x, __gu_ptr, size, __gu_err); \
__gu_err; \
})
#define __get_user_size(x, ptr, size, retval) \
do { \
switch (size) { \
case 1: \
__get_user_asm_common((x), ptr, "ldb", retval); \
break; \
case 2: \
__get_user_asm_common((x), ptr, "ldh", retval); \
break; \
case 4: \
__get_user_asm_common((x), ptr, "ldw", retval); \
break; \
default: \
x = 0; \
(retval) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm_common(x, ptr, ins, err) \
do { \
int errcode; \
asm volatile( \
"1: " ins " %1, (%4,0) \n" \
" br 3f \n" \
/* Fix up codes */ \
"2: mov %0, %2 \n" \
" movi %1, 0 \n" \
" br 3f \n" \
".section __ex_table,\"a\" \n" \
".align 2 \n" \
".long 1b, 2b \n" \
".previous \n" \
"3: \n" \
: "=r"(err), "=r"(x), "=r"(errcode) \
: "0"(0), "r"(ptr), "2"(-EFAULT) \
: "memory"); \
} while (0)
extern int __get_user_bad(void);
#define ___copy_to_user(to, from, n) \
do { \
int w0, w1, w2, w3; \
asm volatile( \
"0: cmpnei %1, 0 \n" \
" bf 8f \n" \
" mov %3, %1 \n" \
" or %3, %2 \n" \
" andi %3, 3 \n" \
" cmpnei %3, 0 \n" \
" bf 1f \n" \
" br 5f \n" \
"1: cmplti %0, 16 \n" /* 4W */ \
" bt 3f \n" \
" ldw %3, (%2, 0) \n" \
" ldw %4, (%2, 4) \n" \
" ldw %5, (%2, 8) \n" \
" ldw %6, (%2, 12) \n" \
"2: stw %3, (%1, 0) \n" \
"9: stw %4, (%1, 4) \n" \
"10: stw %5, (%1, 8) \n" \
"11: stw %6, (%1, 12) \n" \
" addi %2, 16 \n" \
" addi %1, 16 \n" \
" subi %0, 16 \n" \
" br 1b \n" \
"3: cmplti %0, 4 \n" /* 1W */ \
" bt 5f \n" \
" ldw %3, (%2, 0) \n" \
"4: stw %3, (%1, 0) \n" \
" addi %2, 4 \n" \
" addi %1, 4 \n" \
" subi %0, 4 \n" \
" br 3b \n" \
"5: cmpnei %0, 0 \n" /* 1B */ \
" bf 13f \n" \
" ldb %3, (%2, 0) \n" \
"6: stb %3, (%1, 0) \n" \
" addi %2, 1 \n" \
" addi %1, 1 \n" \
" subi %0, 1 \n" \
" br 5b \n" \
"7: subi %0, 4 \n" \
"8: subi %0, 4 \n" \
"12: subi %0, 4 \n" \
" br 13f \n" \
".section __ex_table, \"a\" \n" \
".align 2 \n" \
".long 2b, 13f \n" \
".long 4b, 13f \n" \
".long 6b, 13f \n" \
".long 9b, 12b \n" \
".long 10b, 8b \n" \
".long 11b, 7b \n" \
".previous \n" \
"13: \n" \
: "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
"=r"(w1), "=r"(w2), "=r"(w3) \
: "0"(n), "1"(to), "2"(from) \
: "memory"); \
} while (0)
#define ___copy_from_user(to, from, n) \
#define __put_user_asm_64(x, ptr, err) \
do { \
int tmp; \
int nsave; \
asm volatile( \
"0: cmpnei %1, 0 \n" \
" bf 7f \n" \
" mov %3, %1 \n" \
" or %3, %2 \n" \
" andi %3, 3 \n" \
" cmpnei %3, 0 \n" \
" bf 1f \n" \
" br 5f \n" \
"1: cmplti %0, 16 \n" \
" bt 3f \n" \
"2: ldw %3, (%2, 0) \n" \
"10: ldw %4, (%2, 4) \n" \
" stw %3, (%1, 0) \n" \
" stw %4, (%1, 4) \n" \
"11: ldw %3, (%2, 8) \n" \
"12: ldw %4, (%2, 12) \n" \
" stw %3, (%1, 8) \n" \
" stw %4, (%1, 12) \n" \
" addi %2, 16 \n" \
" addi %1, 16 \n" \
" subi %0, 16 \n" \
" br 1b \n" \
"3: cmplti %0, 4 \n" \
" bt 5f \n" \
"4: ldw %3, (%2, 0) \n" \
" stw %3, (%1, 0) \n" \
" addi %2, 4 \n" \
" addi %1, 4 \n" \
" subi %0, 4 \n" \
" br 3b \n" \
"5: cmpnei %0, 0 \n" \
" bf 7f \n" \
"6: ldb %3, (%2, 0) \n" \
" stb %3, (%1, 0) \n" \
" addi %2, 1 \n" \
" addi %1, 1 \n" \
" subi %0, 1 \n" \
" br 5b \n" \
"8: stw %3, (%1, 0) \n" \
" subi %0, 4 \n" \
" bf 7f \n" \
"9: subi %0, 8 \n" \
" bf 7f \n" \
"13: stw %3, (%1, 8) \n" \
" subi %0, 12 \n" \
" bf 7f \n" \
".section __ex_table, \"a\" \n" \
".align 2 \n" \
".long 2b, 7f \n" \
".long 4b, 7f \n" \
".long 6b, 7f \n" \
".long 10b, 8b \n" \
".long 11b, 9b \n" \
".long 12b,13b \n" \
".previous \n" \
"7: \n" \
: "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
"=r"(tmp) \
: "0"(n), "1"(to), "2"(from) \
int errcode; \
\
__asm__ __volatile__( \
" ldw %3, (%1, 0) \n" \
"1: stw %3, (%2, 0) \n" \
" ldw %3, (%1, 4) \n" \
"2: stw %3, (%2, 4) \n" \
" br 4f \n" \
"3: mov %0, %4 \n" \
" br 4f \n" \
".section __ex_table, \"a\" \n" \
".align 2 \n" \
".long 1b, 3b \n" \
".long 2b, 3b \n" \
".previous \n" \
"4: \n" \
: "=r"(err), "=r"(x), "=r"(ptr), \
"=r"(tmp), "=r"(errcode) \
: "0"(err), "1"(x), "2"(ptr), "3"(0), \
"4"(-EFAULT) \
: "memory"); \
} while (0)
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
int retval = 0;
u32 tmp;
switch (size) {
case 1:
tmp = *(u8 *)x;
__put_user_asm_b(tmp, ptr, retval);
break;
case 2:
tmp = *(u16 *)x;
__put_user_asm_h(tmp, ptr, retval);
break;
case 4:
tmp = *(u32 *)x;
__put_user_asm_w(tmp, ptr, retval);
break;
case 8:
__put_user_asm_64(x, (u64 *)ptr, retval);
break;
}
return retval;
}
#define __put_user_fn __put_user_fn
/*
* __get_user_fn
*/
extern int __get_user_bad(void);
#define __get_user_asm_common(x, ptr, ins, err) \
do { \
int errcode; \
__asm__ __volatile__( \
"1: " ins " %1, (%4, 0) \n" \
" br 3f \n" \
"2: mov %0, %2 \n" \
" movi %1, 0 \n" \
" br 3f \n" \
".section __ex_table,\"a\" \n" \
".align 2 \n" \
".long 1b, 2b \n" \
".previous \n" \
"3: \n" \
: "=r"(err), "=r"(x), "=r"(errcode) \
: "0"(0), "r"(ptr), "2"(-EFAULT) \
: "memory"); \
} while (0)
#define __get_user_asm_64(x, ptr, err) \
do { \
int tmp; \
int errcode; \
\
__asm__ __volatile__( \
"1: ldw %3, (%2, 0) \n" \
" stw %3, (%1, 0) \n" \
"2: ldw %3, (%2, 4) \n" \
" stw %3, (%1, 4) \n" \
" br 4f \n" \
"3: mov %0, %4 \n" \
" br 4f \n" \
".section __ex_table, \"a\" \n" \
".align 2 \n" \
".long 1b, 3b \n" \
".long 2b, 3b \n" \
".previous \n" \
"4: \n" \
: "=r"(err), "=r"(x), "=r"(ptr), \
"=r"(tmp), "=r"(errcode) \
: "0"(err), "1"(x), "2"(ptr), "3"(0), \
"4"(-EFAULT) \
: "memory"); \
} while (0)
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
int retval;
u32 tmp;
switch (size) {
case 1:
__get_user_asm_common(tmp, ptr, "ldb", retval);
*(u8 *)x = (u8)tmp;
break;
case 2:
__get_user_asm_common(tmp, ptr, "ldh", retval);
*(u16 *)x = (u16)tmp;
break;
case 4:
__get_user_asm_common(tmp, ptr, "ldw", retval);
*(u32 *)x = (u32)tmp;
break;
case 8:
__get_user_asm_64(x, ptr, retval);
break;
}
return retval;
}
#define __get_user_fn __get_user_fn
unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
unsigned long clear_user(void *to, unsigned long n);
unsigned long __clear_user(void __user *to, unsigned long n);
#define __clear_user __clear_user
long strncpy_from_user(char *dst, const char *src, long count);
long __strncpy_from_user(char *dst, const char *src, long count);
#define __strncpy_from_user __strncpy_from_user
/*
* Return the size of a string (including the ending 0)
*
* Return 0 on exception, a value greater than N if too long
*/
long strnlen_user(const char *src, long n);
long __strnlen_user(const char *s, long n);
#define __strnlen_user __strnlen_user
struct exception_table_entry {
unsigned long insn;
unsigned long nextinsn;
};
extern int fixup_exception(struct pt_regs *regs);
#include <asm/segment.h>
#include <asm-generic/uaccess.h>
#endif /* __ASM_CSKY_UACCESS_H */

View File

@ -16,7 +16,7 @@ struct vdso_data {
* offset of 0, but since the linker must support setting weak undefined
* symbols to the absolute address 0 it also happens to support other low
* addresses even when the code model suggests those low addresses would not
* otherwise be availiable.
* otherwise be available.
*/
#define VDSO_SYMBOL(base, name) \
({ \

View File

@ -9,7 +9,6 @@
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
#include <linux/threads.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/thread_info.h>

View File

@ -7,7 +7,70 @@
unsigned long raw_copy_from_user(void *to, const void *from,
unsigned long n)
{
___copy_from_user(to, from, n);
int tmp, nsave;
__asm__ __volatile__(
"0: cmpnei %1, 0 \n"
" bf 7f \n"
" mov %3, %1 \n"
" or %3, %2 \n"
" andi %3, 3 \n"
" cmpnei %3, 0 \n"
" bf 1f \n"
" br 5f \n"
"1: cmplti %0, 16 \n"
" bt 3f \n"
"2: ldw %3, (%2, 0) \n"
"10: ldw %4, (%2, 4) \n"
" stw %3, (%1, 0) \n"
" stw %4, (%1, 4) \n"
"11: ldw %3, (%2, 8) \n"
"12: ldw %4, (%2, 12) \n"
" stw %3, (%1, 8) \n"
" stw %4, (%1, 12) \n"
" addi %2, 16 \n"
" addi %1, 16 \n"
" subi %0, 16 \n"
" br 1b \n"
"3: cmplti %0, 4 \n"
" bt 5f \n"
"4: ldw %3, (%2, 0) \n"
" stw %3, (%1, 0) \n"
" addi %2, 4 \n"
" addi %1, 4 \n"
" subi %0, 4 \n"
" br 3b \n"
"5: cmpnei %0, 0 \n"
" bf 7f \n"
"6: ldb %3, (%2, 0) \n"
" stb %3, (%1, 0) \n"
" addi %2, 1 \n"
" addi %1, 1 \n"
" subi %0, 1 \n"
" br 5b \n"
"8: stw %3, (%1, 0) \n"
" subi %0, 4 \n"
" bf 7f \n"
"9: subi %0, 8 \n"
" bf 7f \n"
"13: stw %3, (%1, 8) \n"
" subi %0, 12 \n"
" bf 7f \n"
".section __ex_table, \"a\" \n"
".align 2 \n"
".long 2b, 7f \n"
".long 4b, 7f \n"
".long 6b, 7f \n"
".long 10b, 8b \n"
".long 11b, 9b \n"
".long 12b,13b \n"
".previous \n"
"7: \n"
: "=r"(n), "=r"(to), "=r"(from), "=r"(nsave),
"=r"(tmp)
: "0"(n), "1"(to), "2"(from)
: "memory");
return n;
}
EXPORT_SYMBOL(raw_copy_from_user);
@ -15,48 +78,70 @@ EXPORT_SYMBOL(raw_copy_from_user);
unsigned long raw_copy_to_user(void *to, const void *from,
unsigned long n)
{
___copy_to_user(to, from, n);
int w0, w1, w2, w3;
__asm__ __volatile__(
"0: cmpnei %1, 0 \n"
" bf 8f \n"
" mov %3, %1 \n"
" or %3, %2 \n"
" andi %3, 3 \n"
" cmpnei %3, 0 \n"
" bf 1f \n"
" br 5f \n"
"1: cmplti %0, 16 \n" /* 4W */
" bt 3f \n"
" ldw %3, (%2, 0) \n"
" ldw %4, (%2, 4) \n"
" ldw %5, (%2, 8) \n"
" ldw %6, (%2, 12) \n"
"2: stw %3, (%1, 0) \n"
"9: stw %4, (%1, 4) \n"
"10: stw %5, (%1, 8) \n"
"11: stw %6, (%1, 12) \n"
" addi %2, 16 \n"
" addi %1, 16 \n"
" subi %0, 16 \n"
" br 1b \n"
"3: cmplti %0, 4 \n" /* 1W */
" bt 5f \n"
" ldw %3, (%2, 0) \n"
"4: stw %3, (%1, 0) \n"
" addi %2, 4 \n"
" addi %1, 4 \n"
" subi %0, 4 \n"
" br 3b \n"
"5: cmpnei %0, 0 \n" /* 1B */
" bf 13f \n"
" ldb %3, (%2, 0) \n"
"6: stb %3, (%1, 0) \n"
" addi %2, 1 \n"
" addi %1, 1 \n"
" subi %0, 1 \n"
" br 5b \n"
"7: subi %0, 4 \n"
"8: subi %0, 4 \n"
"12: subi %0, 4 \n"
" br 13f \n"
".section __ex_table, \"a\" \n"
".align 2 \n"
".long 2b, 13f \n"
".long 4b, 13f \n"
".long 6b, 13f \n"
".long 9b, 12b \n"
".long 10b, 8b \n"
".long 11b, 7b \n"
".previous \n"
"13: \n"
: "=r"(n), "=r"(to), "=r"(from), "=r"(w0),
"=r"(w1), "=r"(w2), "=r"(w3)
: "0"(n), "1"(to), "2"(from)
: "memory");
return n;
}
EXPORT_SYMBOL(raw_copy_to_user);
/*
* copy a null terminated string from userspace.
*/
#define __do_strncpy_from_user(dst, src, count, res) \
do { \
int tmp; \
long faultres; \
asm volatile( \
" cmpnei %3, 0 \n" \
" bf 4f \n" \
"1: cmpnei %1, 0 \n" \
" bf 5f \n" \
"2: ldb %4, (%3, 0) \n" \
" stb %4, (%2, 0) \n" \
" cmpnei %4, 0 \n" \
" bf 3f \n" \
" addi %3, 1 \n" \
" addi %2, 1 \n" \
" subi %1, 1 \n" \
" br 1b \n" \
"3: subu %0, %1 \n" \
" br 5f \n" \
"4: mov %0, %5 \n" \
" br 5f \n" \
".section __ex_table, \"a\" \n" \
".align 2 \n" \
".long 2b, 4b \n" \
".previous \n" \
"5: \n" \
: "=r"(res), "=r"(count), "=r"(dst), \
"=r"(src), "=r"(tmp), "=r"(faultres) \
: "5"(-EFAULT), "0"(count), "1"(count), \
"2"(dst), "3"(src) \
: "memory", "cc"); \
} while (0)
/*
* __strncpy_from_user: - Copy a NUL terminated string from userspace,
* with less checking.
@ -80,41 +165,41 @@ do { \
*/
long __strncpy_from_user(char *dst, const char *src, long count)
{
long res;
long res, faultres;
int tmp;
__asm__ __volatile__(
" cmpnei %3, 0 \n"
" bf 4f \n"
"1: cmpnei %1, 0 \n"
" bf 5f \n"
"2: ldb %4, (%3, 0) \n"
" stb %4, (%2, 0) \n"
" cmpnei %4, 0 \n"
" bf 3f \n"
" addi %3, 1 \n"
" addi %2, 1 \n"
" subi %1, 1 \n"
" br 1b \n"
"3: subu %0, %1 \n"
" br 5f \n"
"4: mov %0, %5 \n"
" br 5f \n"
".section __ex_table, \"a\" \n"
".align 2 \n"
".long 2b, 4b \n"
".previous \n"
"5: \n"
: "=r"(res), "=r"(count), "=r"(dst),
"=r"(src), "=r"(tmp), "=r"(faultres)
: "5"(-EFAULT), "0"(count), "1"(count),
"2"(dst), "3"(src)
: "memory");
__do_strncpy_from_user(dst, src, count, res);
return res;
}
EXPORT_SYMBOL(__strncpy_from_user);
/*
* strncpy_from_user: - Copy a NUL terminated string from userspace.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
long strncpy_from_user(char *dst, const char *src, long count)
{
long res = -EFAULT;
if (access_ok(src, 1))
__do_strncpy_from_user(dst, src, count, res);
return res;
}
EXPORT_SYMBOL(strncpy_from_user);
/*
* strnlen_user: - Get the size of a string in user space.
* @str: The string to measure.
@ -126,14 +211,11 @@ EXPORT_SYMBOL(strncpy_from_user);
* On exception, returns 0.
* If the string is too long, returns a value greater than @n.
*/
long strnlen_user(const char *s, long n)
long __strnlen_user(const char *s, long n)
{
unsigned long res, tmp;
if (s == NULL)
return 0;
asm volatile(
__asm__ __volatile__(
" cmpnei %1, 0 \n"
" bf 3f \n"
"1: cmpnei %0, 0 \n"
@ -156,87 +238,11 @@ long strnlen_user(const char *s, long n)
"5: \n"
: "=r"(n), "=r"(s), "=r"(res), "=r"(tmp)
: "0"(n), "1"(s), "2"(n)
: "memory", "cc");
: "memory");
return res;
}
EXPORT_SYMBOL(strnlen_user);
#define __do_clear_user(addr, size) \
do { \
int __d0, zvalue, tmp; \
\
asm volatile( \
"0: cmpnei %1, 0 \n" \
" bf 7f \n" \
" mov %3, %1 \n" \
" andi %3, 3 \n" \
" cmpnei %3, 0 \n" \
" bf 1f \n" \
" br 5f \n" \
"1: cmplti %0, 32 \n" /* 4W */ \
" bt 3f \n" \
"8: stw %2, (%1, 0) \n" \
"10: stw %2, (%1, 4) \n" \
"11: stw %2, (%1, 8) \n" \
"12: stw %2, (%1, 12) \n" \
"13: stw %2, (%1, 16) \n" \
"14: stw %2, (%1, 20) \n" \
"15: stw %2, (%1, 24) \n" \
"16: stw %2, (%1, 28) \n" \
" addi %1, 32 \n" \
" subi %0, 32 \n" \
" br 1b \n" \
"3: cmplti %0, 4 \n" /* 1W */ \
" bt 5f \n" \
"4: stw %2, (%1, 0) \n" \
" addi %1, 4 \n" \
" subi %0, 4 \n" \
" br 3b \n" \
"5: cmpnei %0, 0 \n" /* 1B */ \
"9: bf 7f \n" \
"6: stb %2, (%1, 0) \n" \
" addi %1, 1 \n" \
" subi %0, 1 \n" \
" br 5b \n" \
".section __ex_table,\"a\" \n" \
".align 2 \n" \
".long 8b, 9b \n" \
".long 10b, 9b \n" \
".long 11b, 9b \n" \
".long 12b, 9b \n" \
".long 13b, 9b \n" \
".long 14b, 9b \n" \
".long 15b, 9b \n" \
".long 16b, 9b \n" \
".long 4b, 9b \n" \
".long 6b, 9b \n" \
".previous \n" \
"7: \n" \
: "=r"(size), "=r" (__d0), \
"=r"(zvalue), "=r"(tmp) \
: "0"(size), "1"(addr), "2"(0) \
: "memory", "cc"); \
} while (0)
/*
* clear_user: - Zero a block of memory in user space.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
* Zero a block of memory in user space.
*
* Returns number of bytes that could not be cleared.
* On success, this will be zero.
*/
unsigned long
clear_user(void __user *to, unsigned long n)
{
if (access_ok(to, n))
__do_clear_user(to, n);
return n;
}
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__strnlen_user);
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
@ -252,7 +258,59 @@ EXPORT_SYMBOL(clear_user);
unsigned long
__clear_user(void __user *to, unsigned long n)
{
__do_clear_user(to, n);
int data, value, tmp;
__asm__ __volatile__(
"0: cmpnei %1, 0 \n"
" bf 7f \n"
" mov %3, %1 \n"
" andi %3, 3 \n"
" cmpnei %3, 0 \n"
" bf 1f \n"
" br 5f \n"
"1: cmplti %0, 32 \n" /* 4W */
" bt 3f \n"
"8: stw %2, (%1, 0) \n"
"10: stw %2, (%1, 4) \n"
"11: stw %2, (%1, 8) \n"
"12: stw %2, (%1, 12) \n"
"13: stw %2, (%1, 16) \n"
"14: stw %2, (%1, 20) \n"
"15: stw %2, (%1, 24) \n"
"16: stw %2, (%1, 28) \n"
" addi %1, 32 \n"
" subi %0, 32 \n"
" br 1b \n"
"3: cmplti %0, 4 \n" /* 1W */
" bt 5f \n"
"4: stw %2, (%1, 0) \n"
" addi %1, 4 \n"
" subi %0, 4 \n"
" br 3b \n"
"5: cmpnei %0, 0 \n" /* 1B */
"9: bf 7f \n"
"6: stb %2, (%1, 0) \n"
" addi %1, 1 \n"
" subi %0, 1 \n"
" br 5b \n"
".section __ex_table,\"a\" \n"
".align 2 \n"
".long 8b, 9b \n"
".long 10b, 9b \n"
".long 11b, 9b \n"
".long 12b, 9b \n"
".long 13b, 9b \n"
".long 14b, 9b \n"
".long 15b, 9b \n"
".long 16b, 9b \n"
".long 4b, 9b \n"
".long 6b, 9b \n"
".previous \n"
"7: \n"
: "=r"(n), "=r" (data), "=r"(value), "=r"(tmp)
: "0"(n), "1"(to), "2"(0)
: "memory");
return n;
}
EXPORT_SYMBOL(__clear_user);

View File

@ -12,7 +12,7 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup) {
regs->pc = fixup->nextinsn;
regs->pc = fixup->fixup;
return 1;
}

View File

@ -17,6 +17,7 @@ SYSCALL_DEFINE3(cacheflush,
flush_icache_mm_range(current->mm,
(unsigned long)addr,
(unsigned long)addr + bytes);
fallthrough;
case DCACHE:
dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes);