forked from Minki/linux
Merge branch 'uaccess' (batched user access infrastructure)
Expose an interface to allow users to mark several accesses together as being user space accesses, allowing batching of the surrounding user space access markers (SMAP on x86, PAN on arm64, domain register switching on arm). This is currently only used for the user string lenth and copying functions, where the SMAP overhead on x86 drowned the actual user accesses (only noticeable on newer microarchitectures that support SMAP in the first place, of course). * user access batching branch: Use the new batched user accesses in generic user string handling Add 'unsafe' user access functions for batched accesses x86: reorganize SMAP handling in user space accesses
This commit is contained in:
commit
404a47410c
@ -134,6 +134,9 @@ extern int __get_user_4(void);
|
||||
extern int __get_user_8(void);
|
||||
extern int __get_user_bad(void);
|
||||
|
||||
#define __uaccess_begin() stac()
|
||||
#define __uaccess_end() clac()
|
||||
|
||||
/*
|
||||
* This is a type: either unsigned long, if the argument fits into
|
||||
* that type, or otherwise unsigned long long.
|
||||
@ -193,10 +196,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define __put_user_asm_u64(x, addr, err, errret) \
|
||||
asm volatile(ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1: movl %%eax,0(%2)\n" \
|
||||
"2: movl %%edx,4(%2)\n" \
|
||||
"3: " ASM_CLAC "\n" \
|
||||
"3:" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"4: movl %3,%0\n" \
|
||||
" jmp 3b\n" \
|
||||
@ -207,10 +210,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||
: "A" (x), "r" (addr), "i" (errret), "0" (err))
|
||||
|
||||
#define __put_user_asm_ex_u64(x, addr) \
|
||||
asm volatile(ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1: movl %%eax,0(%1)\n" \
|
||||
"2: movl %%edx,4(%1)\n" \
|
||||
"3: " ASM_CLAC "\n" \
|
||||
"3:" \
|
||||
_ASM_EXTABLE_EX(1b, 2b) \
|
||||
_ASM_EXTABLE_EX(2b, 3b) \
|
||||
: : "A" (x), "r" (addr))
|
||||
@ -304,6 +307,10 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This doesn't do __uaccess_begin/end - the exception handling
|
||||
* around it must do that.
|
||||
*/
|
||||
#define __put_user_size_ex(x, ptr, size) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
@ -358,9 +365,9 @@ do { \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
|
||||
asm volatile(ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1: mov"itype" %2,%"rtype"1\n" \
|
||||
"2: " ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3: mov %3,%0\n" \
|
||||
" xor"itype" %"rtype"1,%"rtype"1\n" \
|
||||
@ -370,6 +377,10 @@ do { \
|
||||
: "=r" (err), ltype(x) \
|
||||
: "m" (__m(addr)), "i" (errret), "0" (err))
|
||||
|
||||
/*
|
||||
* This doesn't do __uaccess_begin/end - the exception handling
|
||||
* around it must do that.
|
||||
*/
|
||||
#define __get_user_size_ex(x, ptr, size) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
@ -400,7 +411,9 @@ do { \
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
int __pu_err; \
|
||||
__uaccess_begin(); \
|
||||
__put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
|
||||
__uaccess_end(); \
|
||||
__builtin_expect(__pu_err, 0); \
|
||||
})
|
||||
|
||||
@ -408,7 +421,9 @@ do { \
|
||||
({ \
|
||||
int __gu_err; \
|
||||
unsigned long __gu_val; \
|
||||
__uaccess_begin(); \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
|
||||
__uaccess_end(); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
})
|
||||
@ -423,9 +438,9 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
* aliasing issues.
|
||||
*/
|
||||
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
|
||||
asm volatile(ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1: mov"itype" %"rtype"1,%2\n" \
|
||||
"2: " ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3: mov %3,%0\n" \
|
||||
" jmp 2b\n" \
|
||||
@ -445,11 +460,11 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
*/
|
||||
#define uaccess_try do { \
|
||||
current_thread_info()->uaccess_err = 0; \
|
||||
stac(); \
|
||||
__uaccess_begin(); \
|
||||
barrier();
|
||||
|
||||
#define uaccess_catch(err) \
|
||||
clac(); \
|
||||
__uaccess_end(); \
|
||||
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
|
||||
} while (0)
|
||||
|
||||
@ -547,12 +562,13 @@ extern void __cmpxchg_wrong_size(void)
|
||||
__typeof__(ptr) __uval = (uval); \
|
||||
__typeof__(*(ptr)) __old = (old); \
|
||||
__typeof__(*(ptr)) __new = (new); \
|
||||
__uaccess_begin(); \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
{ \
|
||||
asm volatile("\t" ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
|
||||
"2:\t" ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
"\t.section .fixup, \"ax\"\n" \
|
||||
"3:\tmov %3, %0\n" \
|
||||
"\tjmp 2b\n" \
|
||||
@ -566,9 +582,9 @@ extern void __cmpxchg_wrong_size(void)
|
||||
} \
|
||||
case 2: \
|
||||
{ \
|
||||
asm volatile("\t" ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
|
||||
"2:\t" ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
"\t.section .fixup, \"ax\"\n" \
|
||||
"3:\tmov %3, %0\n" \
|
||||
"\tjmp 2b\n" \
|
||||
@ -582,9 +598,9 @@ extern void __cmpxchg_wrong_size(void)
|
||||
} \
|
||||
case 4: \
|
||||
{ \
|
||||
asm volatile("\t" ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
|
||||
"2:\t" ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
"\t.section .fixup, \"ax\"\n" \
|
||||
"3:\tmov %3, %0\n" \
|
||||
"\tjmp 2b\n" \
|
||||
@ -601,9 +617,9 @@ extern void __cmpxchg_wrong_size(void)
|
||||
if (!IS_ENABLED(CONFIG_X86_64)) \
|
||||
__cmpxchg_wrong_size(); \
|
||||
\
|
||||
asm volatile("\t" ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
|
||||
"2:\t" ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
"\t.section .fixup, \"ax\"\n" \
|
||||
"3:\tmov %3, %0\n" \
|
||||
"\tjmp 2b\n" \
|
||||
@ -618,6 +634,7 @@ extern void __cmpxchg_wrong_size(void)
|
||||
default: \
|
||||
__cmpxchg_wrong_size(); \
|
||||
} \
|
||||
__uaccess_end(); \
|
||||
*__uval = __old; \
|
||||
__ret; \
|
||||
})
|
||||
@ -754,5 +771,30 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
*/
|
||||
#define __copy_from_user_nmi __copy_from_user_inatomic
|
||||
|
||||
/*
|
||||
* The "unsafe" user accesses aren't really "unsafe", but the naming
|
||||
* is a big fat warning: you have to not only do the access_ok()
|
||||
* checking before using them, but you have to surround them with the
|
||||
* user_access_begin/end() pair.
|
||||
*/
|
||||
#define user_access_begin() __uaccess_begin()
|
||||
#define user_access_end() __uaccess_end()
|
||||
|
||||
#define unsafe_put_user(x, ptr) \
|
||||
({ \
|
||||
int __pu_err; \
|
||||
__put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
|
||||
__builtin_expect(__pu_err, 0); \
|
||||
})
|
||||
|
||||
#define unsafe_get_user(x, ptr) \
|
||||
({ \
|
||||
int __gu_err; \
|
||||
unsigned long __gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
})
|
||||
|
||||
#endif /* _ASM_X86_UACCESS_H */
|
||||
|
||||
|
@ -56,35 +56,49 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
|
||||
if (!__builtin_constant_p(size))
|
||||
return copy_user_generic(dst, (__force void *)src, size);
|
||||
switch (size) {
|
||||
case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u8 *)dst, (u8 __user *)src,
|
||||
ret, "b", "b", "=q", 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u16 *)dst, (u16 __user *)src,
|
||||
ret, "w", "w", "=r", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u32 *)dst, (u32 __user *)src,
|
||||
ret, "l", "k", "=r", 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
case 8:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 10:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 10);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
__get_user_asm(*(u16 *)(8 + (char *)dst),
|
||||
(u16 __user *)(8 + (char __user *)src),
|
||||
ret, "w", "w", "=r", 2);
|
||||
if (likely(!ret))
|
||||
__get_user_asm(*(u16 *)(8 + (char *)dst),
|
||||
(u16 __user *)(8 + (char __user *)src),
|
||||
ret, "w", "w", "=r", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 16:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 16);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
__get_user_asm(*(u64 *)(8 + (char *)dst),
|
||||
(u64 __user *)(8 + (char __user *)src),
|
||||
ret, "q", "", "=r", 8);
|
||||
if (likely(!ret))
|
||||
__get_user_asm(*(u64 *)(8 + (char *)dst),
|
||||
(u64 __user *)(8 + (char __user *)src),
|
||||
ret, "q", "", "=r", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
default:
|
||||
return copy_user_generic(dst, (__force void *)src, size);
|
||||
@ -106,35 +120,51 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
|
||||
if (!__builtin_constant_p(size))
|
||||
return copy_user_generic((__force void *)dst, src, size);
|
||||
switch (size) {
|
||||
case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u8 *)src, (u8 __user *)dst,
|
||||
ret, "b", "b", "iq", 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u16 *)src, (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u32 *)src, (u32 __user *)dst,
|
||||
ret, "l", "k", "ir", 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
case 8:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 10:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 10);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
asm("":::"memory");
|
||||
__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
if (likely(!ret)) {
|
||||
asm("":::"memory");
|
||||
__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
}
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 16:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 16);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
asm("":::"memory");
|
||||
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
if (likely(!ret)) {
|
||||
asm("":::"memory");
|
||||
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
}
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
default:
|
||||
return copy_user_generic((__force void *)dst, src, size);
|
||||
@ -160,39 +190,47 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
||||
switch (size) {
|
||||
case 1: {
|
||||
u8 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u8 __user *)src,
|
||||
ret, "b", "b", "=q", 1);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u8 __user *)dst,
|
||||
ret, "b", "b", "iq", 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
case 2: {
|
||||
u16 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u16 __user *)src,
|
||||
ret, "w", "w", "=r", 2);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
|
||||
case 4: {
|
||||
u32 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u32 __user *)src,
|
||||
ret, "l", "k", "=r", 4);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u32 __user *)dst,
|
||||
ret, "l", "k", "ir", 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
case 8: {
|
||||
u64 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 8);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
|
@ -111,4 +111,11 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
|
||||
#define probe_kernel_address(addr, retval) \
|
||||
probe_kernel_read(&retval, addr, sizeof(retval))
|
||||
|
||||
#ifndef user_access_begin
|
||||
#define user_access_begin() do { } while (0)
|
||||
#define user_access_end() do { } while (0)
|
||||
#define unsafe_get_user(x, ptr) __get_user(x, ptr)
|
||||
#define unsafe_put_user(x, ptr) __put_user(x, ptr)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_UACCESS_H__ */
|
||||
|
@ -39,7 +39,7 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
|
||||
unsigned long c, data;
|
||||
|
||||
/* Fall back to byte-at-a-time if we get a page fault */
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
|
||||
break;
|
||||
*(unsigned long *)(dst+res) = c;
|
||||
if (has_zero(c, &data, &constants)) {
|
||||
@ -55,7 +55,7 @@ byte_at_a_time:
|
||||
while (max) {
|
||||
char c;
|
||||
|
||||
if (unlikely(__get_user(c,src+res)))
|
||||
if (unlikely(unsafe_get_user(c,src+res)))
|
||||
return -EFAULT;
|
||||
dst[res] = c;
|
||||
if (!c)
|
||||
@ -107,7 +107,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
|
||||
src_addr = (unsigned long)src;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strncpy_from_user(dst, src, count, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strncpy_from_user(dst, src, count, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
||||
src -= align;
|
||||
max += align;
|
||||
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)src)))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
|
||||
return 0;
|
||||
c |= aligned_byte_mask(align);
|
||||
|
||||
@ -61,7 +61,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
||||
if (unlikely(max <= sizeof(unsigned long)))
|
||||
break;
|
||||
max -= sizeof(unsigned long);
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
|
||||
return 0;
|
||||
}
|
||||
res -= align;
|
||||
@ -112,7 +112,12 @@ long strnlen_user(const char __user *str, long count)
|
||||
src_addr = (unsigned long)str;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strnlen_user(str, count, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strnlen_user(str, count, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -141,7 +146,12 @@ long strlen_user(const char __user *str)
|
||||
src_addr = (unsigned long)str;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strnlen_user(str, ~0ul, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strnlen_user(str, ~0ul, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user