mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
arm64 fix for 5.4
- Ensure PAN is re-enabled following user fault in uaccess routines -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAl3WdTEQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNE43B/9ow5C7aV1Jmb9dk3zfkDkVuviCkv9STpNW YosKSAHwGvsfmBd/f+kd/ipqnt/fBLbXHqRderbayDW7xundCP8Qgl51N2RwJ92O si3rPHW62xjbv0U3XqY9h74wH4jfox3KAT8c5EBcLuZtmV3gTb1txWDQ0PfQZMpN 0cmfTRPHB7LKJ/hupEyq6iVPYx+mV1ajenW1uzBOQhu+c8jC5Uf9kDn/D93rCYCg 7NL88odhP0Oyt6HDcR5LB+nl7fz+q0jXzxyF8qHDWNitV4aovU9rLMpDpn4YVvYd DcFEyGdwYSxExLFrBuvmevZ7+2biT3qWQcPuNQGCcqWQSu38ULFK =r7nI -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fix from Will Deacon: "Ensure PAN is re-enabled following user fault in uaccess routines. After I thought we were done for 5.4, we had a report this week of a nasty issue that has been shown to leak data between different user address spaces thanks to corruption of entries in the TLB. In hindsight, we should have spotted this in review when the PAN code was merged back in v4.3, but hindsight is 20/20 and I'm trying not to beat myself up too much about it despite being fairly miserable. Anyway, the fix is "obvious" but the actual failure is more more subtle, and is described in the commit message. I've included a fairly mechanical follow-up patch here as well, which moves this checking out into the C wrappers which is what we do for {get,put}_user() already and allows us to remove these bloody assembly macros entirely. The patches have passed kernelci [1] [2] [3] and CKI [4] tests over night, as well as some targetted testing [5] for this particular issue. The first patch is tagged for stable and should be applied to 4.14, 4.19 and 5.3. I have separate backports for 4.4 and 4.9, which I'll send out once this has landed in your tree (although the original patch applies cleanly, it won't build for those two trees). Thanks to Pavel Tatashin for reporting this and Mark Rutland for helping to diagnose the issue and review/test the solution" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: uaccess: Remove uaccess_*_not_uao asm macros arm64: uaccess: Ensure PAN is re-enabled after unhandled uaccess fault
This commit is contained in:
commit
81429eb8d9
@ -58,23 +58,6 @@ alternative_else_nop_endif
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These macros are no-ops when UAO is present.
|
||||
*/
|
||||
.macro uaccess_disable_not_uao, tmp1, tmp2
|
||||
uaccess_ttbr0_disable \tmp1, \tmp2
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(1)
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
|
||||
uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(0)
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Remove the address tag from a virtual address, if present.
|
||||
*/
|
||||
|
@ -378,20 +378,34 @@ do { \
|
||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_from_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
|
||||
unsigned long __acfu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
__acfu_ret = __arch_copy_from_user((to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
__acfu_ret; \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
#define raw_copy_to_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
|
||||
unsigned long __actu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
|
||||
(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
__actu_ret; \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_in_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_in_user(__uaccess_mask_ptr(to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
unsigned long __aciu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
__aciu_ret; \
|
||||
})
|
||||
|
||||
#define INLINE_COPY_TO_USER
|
||||
@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
|
||||
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
|
||||
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
if (access_ok(to, n))
|
||||
if (access_ok(to, n)) {
|
||||
uaccess_enable_not_uao();
|
||||
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
||||
uaccess_disable_not_uao();
|
||||
}
|
||||
return n;
|
||||
}
|
||||
#define clear_user __clear_user
|
||||
|
@ -20,7 +20,6 @@
|
||||
* Alignment fixed up by hardware.
|
||||
*/
|
||||
ENTRY(__arch_clear_user)
|
||||
uaccess_enable_not_uao x2, x3, x4
|
||||
mov x2, x1 // save the size for fixup return
|
||||
subs x1, x1, #8
|
||||
b.mi 2f
|
||||
@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
|
||||
b.mi 5f
|
||||
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
|
||||
5: mov x0, #0
|
||||
uaccess_disable_not_uao x2, x3
|
||||
ret
|
||||
ENDPROC(__arch_clear_user)
|
||||
EXPORT_SYMBOL(__arch_clear_user)
|
||||
|
@ -54,10 +54,8 @@
|
||||
|
||||
end .req x5
|
||||
ENTRY(__arch_copy_from_user)
|
||||
uaccess_enable_not_uao x3, x4, x5
|
||||
add end, x0, x2
|
||||
#include "copy_template.S"
|
||||
uaccess_disable_not_uao x3, x4
|
||||
mov x0, #0 // Nothing to copy
|
||||
ret
|
||||
ENDPROC(__arch_copy_from_user)
|
||||
|
@ -56,10 +56,8 @@
|
||||
end .req x5
|
||||
|
||||
ENTRY(__arch_copy_in_user)
|
||||
uaccess_enable_not_uao x3, x4, x5
|
||||
add end, x0, x2
|
||||
#include "copy_template.S"
|
||||
uaccess_disable_not_uao x3, x4
|
||||
mov x0, #0
|
||||
ret
|
||||
ENDPROC(__arch_copy_in_user)
|
||||
|
@ -53,10 +53,8 @@
|
||||
|
||||
end .req x5
|
||||
ENTRY(__arch_copy_to_user)
|
||||
uaccess_enable_not_uao x3, x4, x5
|
||||
add end, x0, x2
|
||||
#include "copy_template.S"
|
||||
uaccess_disable_not_uao x3, x4
|
||||
mov x0, #0
|
||||
ret
|
||||
ENDPROC(__arch_copy_to_user)
|
||||
|
@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
|
||||
unsigned long __copy_user_flushcache(void *to, const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
unsigned long rc = __arch_copy_from_user(to, from, n);
|
||||
unsigned long rc;
|
||||
|
||||
uaccess_enable_not_uao();
|
||||
rc = __arch_copy_from_user(to, from, n);
|
||||
uaccess_disable_not_uao();
|
||||
|
||||
/* See above */
|
||||
__clean_dcache_area_pop(to, n - rc);
|
||||
|
Loading…
Reference in New Issue
Block a user