2019-06-04 08:11:33 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2014-03-21 09:19:17 +00:00
|
|
|
/*
|
|
|
|
* linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
|
|
|
|
*
|
2017-02-03 14:49:37 +00:00
|
|
|
* Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* included by aes-ce.S and aes-neon.S */
|
|
|
|
|
|
|
|
.text
|
|
|
|
.align 4
|
|
|
|
|
2019-06-24 17:38:30 +00:00
|
|
|
#ifndef MAX_STRIDE
|
|
|
|
#define MAX_STRIDE 4
|
|
|
|
#endif
|
|
|
|
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
#if MAX_STRIDE == 4
|
|
|
|
#define ST4(x...) x
|
|
|
|
#define ST5(x...)
|
|
|
|
#else
|
|
|
|
#define ST4(x...)
|
|
|
|
#define ST5(x...) x
|
|
|
|
#endif
|
|
|
|
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_START_LOCAL(aes_encrypt_block4x)
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_END(aes_encrypt_block4x)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_START_LOCAL(aes_decrypt_block4x)
|
2018-09-10 14:41:13 +00:00
|
|
|
decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_END(aes_decrypt_block4x)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2019-06-24 17:38:30 +00:00
|
|
|
#if MAX_STRIDE == 5
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_START_LOCAL(aes_encrypt_block5x)
|
2019-06-24 17:38:30 +00:00
|
|
|
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
|
|
|
ret
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_END(aes_encrypt_block5x)
|
2019-06-24 17:38:30 +00:00
|
|
|
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_START_LOCAL(aes_decrypt_block5x)
|
2019-06-24 17:38:30 +00:00
|
|
|
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
|
|
|
ret
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_END(aes_decrypt_block5x)
|
2019-06-24 17:38:30 +00:00
|
|
|
#endif
|
|
|
|
|
2014-03-21 09:19:17 +00:00
|
|
|
/*
|
|
|
|
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
* int blocks)
|
2014-03-21 09:19:17 +00:00
|
|
|
* aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
* int blocks)
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_ecb_encrypt)
|
2018-09-10 14:41:13 +00:00
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
mov x29, sp
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2018-09-10 14:41:13 +00:00
|
|
|
enc_prepare w3, x2, x5
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
.LecbencloopNx:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
subs w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lecbenc1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST4( bl aes_encrypt_block4x )
|
|
|
|
ST5( ld1 {v4.16b}, [x1], #16 )
|
|
|
|
ST5( bl aes_encrypt_block5x )
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( st1 {v4.16b}, [x0], #16 )
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LecbencloopNx
|
|
|
|
.Lecbenc1x:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
adds w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lecbencout
|
|
|
|
.Lecbencloop:
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16 /* get next pt block */
|
|
|
|
encrypt_block v0, w3, x2, x5, w6
|
|
|
|
st1 {v0.16b}, [x0], #16
|
|
|
|
subs w4, w4, #1
|
2014-03-21 09:19:17 +00:00
|
|
|
bne .Lecbencloop
|
|
|
|
.Lecbencout:
|
2018-09-10 14:41:13 +00:00
|
|
|
ldp x29, x30, [sp], #16
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_ecb_encrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_ecb_decrypt)
|
2018-09-10 14:41:13 +00:00
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
mov x29, sp
|
2018-04-30 16:18:24 +00:00
|
|
|
|
2018-09-10 14:41:13 +00:00
|
|
|
dec_prepare w3, x2, x5
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
.LecbdecloopNx:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
subs w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lecbdec1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST4( bl aes_decrypt_block4x )
|
|
|
|
ST5( ld1 {v4.16b}, [x1], #16 )
|
|
|
|
ST5( bl aes_decrypt_block5x )
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( st1 {v4.16b}, [x0], #16 )
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LecbdecloopNx
|
|
|
|
.Lecbdec1x:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
adds w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lecbdecout
|
|
|
|
.Lecbdecloop:
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16 /* get next ct block */
|
|
|
|
decrypt_block v0, w3, x2, x5, w6
|
|
|
|
st1 {v0.16b}, [x0], #16
|
|
|
|
subs w4, w4, #1
|
2014-03-21 09:19:17 +00:00
|
|
|
bne .Lecbdecloop
|
|
|
|
.Lecbdecout:
|
2018-09-10 14:41:13 +00:00
|
|
|
ldp x29, x30, [sp], #16
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_ecb_decrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
* int blocks, u8 iv[])
|
2014-03-21 09:19:17 +00:00
|
|
|
* aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
* int blocks, u8 iv[])
|
2019-08-19 14:17:36 +00:00
|
|
|
* aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
|
|
|
|
* int rounds, int blocks, u8 iv[],
|
|
|
|
* u32 const rk2[]);
|
|
|
|
* aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
|
|
|
|
* int rounds, int blocks, u8 iv[],
|
|
|
|
* u32 const rk2[]);
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_essiv_cbc_encrypt)
|
2019-08-19 14:17:36 +00:00
|
|
|
ld1 {v4.16b}, [x5] /* get iv */
|
|
|
|
|
|
|
|
mov w8, #14 /* AES-256: 14 rounds */
|
|
|
|
enc_prepare w8, x6, x7
|
|
|
|
encrypt_block v4, w8, x6, x7, w9
|
|
|
|
enc_switch_key w3, x2, x6
|
|
|
|
b .Lcbcencloop4x
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_cbc_encrypt)
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v4.16b}, [x5] /* get iv */
|
|
|
|
enc_prepare w3, x2, x6
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2018-03-10 15:21:52 +00:00
|
|
|
.Lcbcencloop4x:
|
2018-09-10 14:41:13 +00:00
|
|
|
subs w4, w4, #4
|
2018-03-10 15:21:52 +00:00
|
|
|
bmi .Lcbcenc1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v0, w3, x2, x6, w7
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v1.16b, v1.16b, v0.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v1, w3, x2, x6, w7
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v2.16b, v2.16b, v1.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v2, w3, x2, x6, w7
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v3.16b, v3.16b, v2.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v3, w3, x2, x6, w7
|
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
2018-03-10 15:21:52 +00:00
|
|
|
mov v4.16b, v3.16b
|
|
|
|
b .Lcbcencloop4x
|
|
|
|
.Lcbcenc1x:
|
2018-09-10 14:41:13 +00:00
|
|
|
adds w4, w4, #4
|
2018-03-10 15:21:52 +00:00
|
|
|
beq .Lcbcencout
|
|
|
|
.Lcbcencloop:
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16 /* get next pt block */
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v4, w3, x2, x6, w7
|
|
|
|
st1 {v4.16b}, [x0], #16
|
|
|
|
subs w4, w4, #1
|
2014-03-21 09:19:17 +00:00
|
|
|
bne .Lcbcencloop
|
2018-03-10 15:21:52 +00:00
|
|
|
.Lcbcencout:
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v4.16b}, [x5] /* return iv */
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_cbc_encrypt)
|
|
|
|
AES_FUNC_END(aes_essiv_cbc_encrypt)
|
2019-08-19 14:17:36 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_essiv_cbc_decrypt)
|
2019-08-19 14:17:36 +00:00
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
mov x29, sp
|
|
|
|
|
|
|
|
ld1 {cbciv.16b}, [x5] /* get iv */
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2019-08-19 14:17:36 +00:00
|
|
|
mov w8, #14 /* AES-256: 14 rounds */
|
|
|
|
enc_prepare w8, x6, x7
|
|
|
|
encrypt_block cbciv, w8, x6, x7, w9
|
|
|
|
b .Lessivcbcdecstart
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_cbc_decrypt)
|
2018-09-10 14:41:13 +00:00
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
mov x29, sp
|
2014-03-21 09:19:17 +00:00
|
|
|
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ld1 {cbciv.16b}, [x5] /* get iv */
|
2019-08-19 14:17:36 +00:00
|
|
|
.Lessivcbcdecstart:
|
2018-09-10 14:41:13 +00:00
|
|
|
dec_prepare w3, x2, x6
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
.LcbcdecloopNx:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
subs w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lcbcdec1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
#if MAX_STRIDE == 5
|
|
|
|
ld1 {v4.16b}, [x1], #16 /* get 1 ct block */
|
|
|
|
mov v5.16b, v0.16b
|
|
|
|
mov v6.16b, v1.16b
|
|
|
|
mov v7.16b, v2.16b
|
|
|
|
bl aes_decrypt_block5x
|
|
|
|
sub x1, x1, #32
|
|
|
|
eor v0.16b, v0.16b, cbciv.16b
|
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */
|
|
|
|
ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
|
|
|
eor v3.16b, v3.16b, v7.16b
|
|
|
|
eor v4.16b, v4.16b, v5.16b
|
|
|
|
#else
|
2014-03-21 09:19:17 +00:00
|
|
|
mov v4.16b, v0.16b
|
|
|
|
mov v5.16b, v1.16b
|
|
|
|
mov v6.16b, v2.16b
|
2018-03-10 15:21:51 +00:00
|
|
|
bl aes_decrypt_block4x
|
2018-09-10 14:41:13 +00:00
|
|
|
sub x1, x1, #16
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
eor v0.16b, v0.16b, cbciv.16b
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v1.16b, v1.16b, v4.16b
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v2.16b, v2.16b, v5.16b
|
|
|
|
eor v3.16b, v3.16b, v6.16b
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
#endif
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( st1 {v4.16b}, [x0], #16 )
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LcbcdecloopNx
|
|
|
|
.Lcbcdec1x:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
adds w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lcbcdecout
|
|
|
|
.Lcbcdecloop:
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
2014-03-21 09:19:17 +00:00
|
|
|
mov v0.16b, v1.16b /* ...and copy to v0 */
|
2018-09-10 14:41:13 +00:00
|
|
|
decrypt_block v0, w3, x2, x6, w7
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */
|
|
|
|
mov cbciv.16b, v1.16b /* ct is next iv */
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b}, [x0], #16
|
|
|
|
subs w4, w4, #1
|
2014-03-21 09:19:17 +00:00
|
|
|
bne .Lcbcdecloop
|
|
|
|
.Lcbcdecout:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
st1 {cbciv.16b}, [x5] /* return iv */
|
2018-09-10 14:41:13 +00:00
|
|
|
ldp x29, x30, [sp], #16
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_cbc_decrypt)
|
|
|
|
AES_FUNC_END(aes_essiv_cbc_decrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
|
2018-09-10 14:41:14 +00:00
|
|
|
/*
|
|
|
|
* aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
|
|
|
* int rounds, int bytes, u8 const iv[])
|
|
|
|
* aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
|
|
|
|
* int rounds, int bytes, u8 const iv[])
|
|
|
|
*/
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_cbc_cts_encrypt)
|
2018-09-10 14:41:14 +00:00
|
|
|
adr_l x8, .Lcts_permute_table
|
|
|
|
sub x4, x4, #16
|
|
|
|
add x9, x8, #32
|
|
|
|
add x8, x8, x4
|
|
|
|
sub x9, x9, x4
|
|
|
|
ld1 {v3.16b}, [x8]
|
|
|
|
ld1 {v4.16b}, [x9]
|
|
|
|
|
|
|
|
ld1 {v0.16b}, [x1], x4 /* overlapping loads */
|
|
|
|
ld1 {v1.16b}, [x1]
|
|
|
|
|
|
|
|
ld1 {v5.16b}, [x5] /* get iv */
|
|
|
|
enc_prepare w3, x2, x6
|
|
|
|
|
|
|
|
eor v0.16b, v0.16b, v5.16b /* xor with iv */
|
|
|
|
tbl v1.16b, {v1.16b}, v4.16b
|
|
|
|
encrypt_block v0, w3, x2, x6, w7
|
|
|
|
|
|
|
|
eor v1.16b, v1.16b, v0.16b
|
|
|
|
tbl v0.16b, {v0.16b}, v3.16b
|
|
|
|
encrypt_block v1, w3, x2, x6, w7
|
|
|
|
|
|
|
|
add x4, x0, x4
|
|
|
|
st1 {v0.16b}, [x4] /* overlapping stores */
|
|
|
|
st1 {v1.16b}, [x0]
|
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_cbc_cts_encrypt)
|
2018-09-10 14:41:14 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_cbc_cts_decrypt)
|
2018-09-10 14:41:14 +00:00
|
|
|
adr_l x8, .Lcts_permute_table
|
|
|
|
sub x4, x4, #16
|
|
|
|
add x9, x8, #32
|
|
|
|
add x8, x8, x4
|
|
|
|
sub x9, x9, x4
|
|
|
|
ld1 {v3.16b}, [x8]
|
|
|
|
ld1 {v4.16b}, [x9]
|
|
|
|
|
|
|
|
ld1 {v0.16b}, [x1], x4 /* overlapping loads */
|
|
|
|
ld1 {v1.16b}, [x1]
|
|
|
|
|
|
|
|
ld1 {v5.16b}, [x5] /* get iv */
|
|
|
|
dec_prepare w3, x2, x6
|
|
|
|
|
|
|
|
decrypt_block v0, w3, x2, x6, w7
|
2019-09-03 16:43:31 +00:00
|
|
|
tbl v2.16b, {v0.16b}, v3.16b
|
|
|
|
eor v2.16b, v2.16b, v1.16b
|
2018-09-10 14:41:14 +00:00
|
|
|
|
|
|
|
tbx v0.16b, {v1.16b}, v4.16b
|
|
|
|
decrypt_block v0, w3, x2, x6, w7
|
|
|
|
eor v0.16b, v0.16b, v5.16b /* xor with iv */
|
|
|
|
|
|
|
|
add x4, x0, x4
|
|
|
|
st1 {v2.16b}, [x4] /* overlapping stores */
|
|
|
|
st1 {v0.16b}, [x0]
|
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_cbc_cts_decrypt)
|
2018-09-10 14:41:14 +00:00
|
|
|
|
|
|
|
.section ".rodata", "a"
|
|
|
|
.align 6
|
|
|
|
.Lcts_permute_table:
|
|
|
|
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
|
|
|
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
|
|
|
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
|
|
|
|
.byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
|
|
|
|
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
|
|
|
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
|
|
|
.previous
|
|
|
|
|
|
|
|
|
2014-03-21 09:19:17 +00:00
|
|
|
/*
|
|
|
|
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
2020-12-17 18:55:16 +00:00
|
|
|
* int bytes, u8 ctr[], u8 finalbuf[])
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_ctr_encrypt)
|
2018-09-10 14:41:13 +00:00
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
mov x29, sp
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
|
2020-12-17 18:55:16 +00:00
|
|
|
enc_prepare w3, x2, x12
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ld1 {vctr.16b}, [x5]
|
2017-01-17 13:46:29 +00:00
|
|
|
|
2020-12-17 18:55:16 +00:00
|
|
|
umov x12, vctr.d[1] /* keep swabbed ctr in reg */
|
|
|
|
rev x12, x12
|
|
|
|
|
2014-03-21 09:19:17 +00:00
|
|
|
.LctrloopNx:
|
2020-12-17 18:55:16 +00:00
|
|
|
add w7, w4, #15
|
|
|
|
sub w4, w4, #MAX_STRIDE << 4
|
|
|
|
lsr w7, w7, #4
|
|
|
|
mov w8, #MAX_STRIDE
|
|
|
|
cmp w7, w8
|
|
|
|
csel w7, w7, w8, lt
|
|
|
|
adds x12, x12, x7
|
|
|
|
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
mov v0.16b, vctr.16b
|
|
|
|
mov v1.16b, vctr.16b
|
|
|
|
mov v2.16b, vctr.16b
|
|
|
|
mov v3.16b, vctr.16b
|
|
|
|
ST5( mov v4.16b, vctr.16b )
|
2020-12-17 18:55:16 +00:00
|
|
|
bcs 0f
|
|
|
|
|
|
|
|
.subsection 1
|
|
|
|
/* apply carry to outgoing counter */
|
|
|
|
0: umov x8, vctr.d[0]
|
|
|
|
rev x8, x8
|
|
|
|
add x8, x8, #1
|
|
|
|
rev x8, x8
|
|
|
|
ins vctr.d[0], x8
|
|
|
|
|
|
|
|
/* apply carry to N counter blocks for N := x12 */
|
2021-04-06 14:25:23 +00:00
|
|
|
cbz x12, 2f
|
2020-12-17 18:55:16 +00:00
|
|
|
adr x16, 1f
|
|
|
|
sub x16, x16, x12, lsl #3
|
|
|
|
br x16
|
|
|
|
hint 34 // bti c
|
|
|
|
mov v0.d[0], vctr.d[0]
|
|
|
|
hint 34 // bti c
|
|
|
|
mov v1.d[0], vctr.d[0]
|
|
|
|
hint 34 // bti c
|
|
|
|
mov v2.d[0], vctr.d[0]
|
|
|
|
hint 34 // bti c
|
|
|
|
mov v3.d[0], vctr.d[0]
|
|
|
|
ST5( hint 34 )
|
|
|
|
ST5( mov v4.d[0], vctr.d[0] )
|
|
|
|
1: b 2f
|
|
|
|
.previous
|
|
|
|
|
|
|
|
2: rev x7, x12
|
|
|
|
ins vctr.d[1], x7
|
|
|
|
sub x7, x12, #MAX_STRIDE - 1
|
|
|
|
sub x8, x12, #MAX_STRIDE - 2
|
|
|
|
sub x9, x12, #MAX_STRIDE - 3
|
|
|
|
rev x7, x7
|
|
|
|
rev x8, x8
|
|
|
|
mov v1.d[1], x7
|
|
|
|
rev x9, x9
|
|
|
|
ST5( sub x10, x12, #MAX_STRIDE - 4 )
|
|
|
|
mov v2.d[1], x8
|
|
|
|
ST5( rev x10, x10 )
|
|
|
|
mov v3.d[1], x9
|
|
|
|
ST5( mov v4.d[1], x10 )
|
|
|
|
tbnz w4, #31, .Lctrtail
|
|
|
|
ld1 {v5.16b-v7.16b}, [x1], #48
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST4( bl aes_encrypt_block4x )
|
|
|
|
ST5( bl aes_encrypt_block5x )
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v5.16b, v0.16b
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST4( ld1 {v5.16b}, [x1], #16 )
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v1.16b, v6.16b, v1.16b
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( ld1 {v5.16b-v6.16b}, [x1], #32 )
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v2.16b, v7.16b, v2.16b
|
|
|
|
eor v3.16b, v5.16b, v3.16b
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( eor v4.16b, v6.16b, v4.16b )
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( st1 {v4.16b}, [x0], #16 )
|
2018-09-10 14:41:13 +00:00
|
|
|
cbz w4, .Lctrout
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LctrloopNx
|
2017-01-17 13:46:29 +00:00
|
|
|
|
|
|
|
.Lctrout:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
st1 {vctr.16b}, [x5] /* return next CTR value */
|
2018-09-10 14:41:13 +00:00
|
|
|
ldp x29, x30, [sp], #16
|
2017-01-17 13:46:29 +00:00
|
|
|
ret
|
|
|
|
|
2020-12-17 18:55:16 +00:00
|
|
|
.Lctrtail:
|
|
|
|
/* XOR up to MAX_STRIDE * 16 - 1 bytes of in/output with v0 ... v3/v4 */
|
|
|
|
mov x16, #16
|
|
|
|
ands x13, x4, #0xf
|
|
|
|
csel x13, x13, x16, ne
|
|
|
|
|
|
|
|
ST5( cmp w4, #64 - (MAX_STRIDE << 4) )
|
|
|
|
ST5( csel x14, x16, xzr, gt )
|
|
|
|
cmp w4, #48 - (MAX_STRIDE << 4)
|
|
|
|
csel x15, x16, xzr, gt
|
|
|
|
cmp w4, #32 - (MAX_STRIDE << 4)
|
|
|
|
csel x16, x16, xzr, gt
|
|
|
|
cmp w4, #16 - (MAX_STRIDE << 4)
|
|
|
|
ble .Lctrtail1x
|
|
|
|
|
|
|
|
adr_l x12, .Lcts_permute_table
|
|
|
|
add x12, x12, x13
|
|
|
|
|
|
|
|
ST5( ld1 {v5.16b}, [x1], x14 )
|
|
|
|
ld1 {v6.16b}, [x1], x15
|
|
|
|
ld1 {v7.16b}, [x1], x16
|
|
|
|
|
|
|
|
ST4( bl aes_encrypt_block4x )
|
|
|
|
ST5( bl aes_encrypt_block5x )
|
|
|
|
|
|
|
|
ld1 {v8.16b}, [x1], x13
|
|
|
|
ld1 {v9.16b}, [x1]
|
|
|
|
ld1 {v10.16b}, [x12]
|
|
|
|
|
|
|
|
ST4( eor v6.16b, v6.16b, v0.16b )
|
|
|
|
ST4( eor v7.16b, v7.16b, v1.16b )
|
|
|
|
ST4( tbl v3.16b, {v3.16b}, v10.16b )
|
|
|
|
ST4( eor v8.16b, v8.16b, v2.16b )
|
|
|
|
ST4( eor v9.16b, v9.16b, v3.16b )
|
|
|
|
|
|
|
|
ST5( eor v5.16b, v5.16b, v0.16b )
|
|
|
|
ST5( eor v6.16b, v6.16b, v1.16b )
|
|
|
|
ST5( tbl v4.16b, {v4.16b}, v10.16b )
|
|
|
|
ST5( eor v7.16b, v7.16b, v2.16b )
|
|
|
|
ST5( eor v8.16b, v8.16b, v3.16b )
|
|
|
|
ST5( eor v9.16b, v9.16b, v4.16b )
|
|
|
|
|
|
|
|
ST5( st1 {v5.16b}, [x0], x14 )
|
|
|
|
st1 {v6.16b}, [x0], x15
|
|
|
|
st1 {v7.16b}, [x0], x16
|
|
|
|
add x13, x13, x0
|
|
|
|
st1 {v9.16b}, [x13] // overlapping stores
|
|
|
|
st1 {v8.16b}, [x0]
|
2019-02-14 08:03:54 +00:00
|
|
|
b .Lctrout
|
2017-01-17 13:46:29 +00:00
|
|
|
|
2020-12-17 18:55:16 +00:00
|
|
|
.Lctrtail1x:
|
|
|
|
csel x0, x0, x6, eq // use finalbuf if less than a full block
|
|
|
|
ld1 {v5.16b}, [x1]
|
|
|
|
ST5( mov v3.16b, v4.16b )
|
|
|
|
encrypt_block v3, w3, x2, x8, w7
|
|
|
|
eor v5.16b, v5.16b, v3.16b
|
|
|
|
st1 {v5.16b}, [x0]
|
|
|
|
b .Lctrout
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_ctr_encrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2019-09-03 16:43:33 +00:00
|
|
|
* aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
|
|
|
|
* int bytes, u8 const rk2[], u8 iv[], int first)
|
2014-03-21 09:19:17 +00:00
|
|
|
* aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
|
2019-09-03 16:43:33 +00:00
|
|
|
* int bytes, u8 const rk2[], u8 iv[], int first)
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
2018-09-10 14:41:15 +00:00
|
|
|
.macro next_tweak, out, in, tmp
|
2014-03-21 09:19:17 +00:00
|
|
|
sshr \tmp\().2d, \in\().2d, #63
|
2018-09-10 14:41:15 +00:00
|
|
|
and \tmp\().16b, \tmp\().16b, xtsmask.16b
|
2014-03-21 09:19:17 +00:00
|
|
|
add \out\().2d, \in\().2d, \in\().2d
|
|
|
|
ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
|
|
|
|
eor \out\().16b, \out\().16b, \tmp\().16b
|
|
|
|
.endm
|
|
|
|
|
2018-09-10 14:41:15 +00:00
|
|
|
.macro xts_load_mask, tmp
|
|
|
|
movi xtsmask.2s, #0x1
|
|
|
|
movi \tmp\().2s, #0x87
|
|
|
|
uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
|
|
|
|
.endm
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_xts_encrypt)
|
2018-09-10 14:41:13 +00:00
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
mov x29, sp
|
2018-03-10 15:21:51 +00:00
|
|
|
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v4.16b}, [x6]
|
2018-10-08 11:16:59 +00:00
|
|
|
xts_load_mask v8
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
cbz w7, .Lxtsencnotfirst
|
|
|
|
|
|
|
|
enc_prepare w3, x5, x8
|
2019-09-03 16:43:34 +00:00
|
|
|
xts_cts_skip_tw w7, .LxtsencNx
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
|
|
|
|
enc_switch_key w3, x2, x8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LxtsencNx
|
|
|
|
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
.Lxtsencnotfirst:
|
2018-09-10 14:41:13 +00:00
|
|
|
enc_prepare w3, x2, x8
|
2014-03-21 09:19:17 +00:00
|
|
|
.LxtsencloopNx:
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v4, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
.LxtsencNx:
|
2019-09-03 16:43:33 +00:00
|
|
|
subs w4, w4, #64
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lxtsenc1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v5, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v6, v5, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v7, v6, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v3.16b, v3.16b, v7.16b
|
2018-03-10 15:21:51 +00:00
|
|
|
bl aes_encrypt_block4x
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v3.16b, v3.16b, v7.16b
|
|
|
|
eor v0.16b, v0.16b, v4.16b
|
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
2014-03-21 09:19:17 +00:00
|
|
|
mov v4.16b, v7.16b
|
2019-09-03 16:43:33 +00:00
|
|
|
cbz w4, .Lxtsencret
|
2018-10-08 11:16:59 +00:00
|
|
|
xts_reload_mask v8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LxtsencloopNx
|
|
|
|
.Lxtsenc1x:
|
2019-09-03 16:43:33 +00:00
|
|
|
adds w4, w4, #64
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lxtsencout
|
2019-09-03 16:43:33 +00:00
|
|
|
subs w4, w4, #16
|
|
|
|
bmi .LxtsencctsNx
|
2014-03-21 09:19:17 +00:00
|
|
|
.Lxtsencloop:
|
2019-09-03 16:43:33 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16
|
|
|
|
.Lxtsencctsout:
|
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v0, w3, x2, x8, w7
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2019-09-03 16:43:33 +00:00
|
|
|
cbz w4, .Lxtsencout
|
|
|
|
subs w4, w4, #16
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v4, v4, v8
|
2019-09-03 16:43:33 +00:00
|
|
|
bmi .Lxtsenccts
|
|
|
|
st1 {v0.16b}, [x0], #16
|
2014-03-21 09:19:17 +00:00
|
|
|
b .Lxtsencloop
|
|
|
|
.Lxtsencout:
|
2019-09-03 16:43:33 +00:00
|
|
|
st1 {v0.16b}, [x0]
|
|
|
|
.Lxtsencret:
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v4.16b}, [x6]
|
|
|
|
ldp x29, x30, [sp], #16
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
|
|
|
|
2019-09-03 16:43:33 +00:00
|
|
|
.LxtsencctsNx:
|
|
|
|
mov v0.16b, v3.16b
|
|
|
|
sub x0, x0, #16
|
|
|
|
.Lxtsenccts:
|
|
|
|
adr_l x8, .Lcts_permute_table
|
|
|
|
|
|
|
|
add x1, x1, w4, sxtw /* rewind input pointer */
|
|
|
|
add w4, w4, #16 /* # bytes in final block */
|
|
|
|
add x9, x8, #32
|
|
|
|
add x8, x8, x4
|
|
|
|
sub x9, x9, x4
|
|
|
|
add x4, x0, x4 /* output address of final block */
|
|
|
|
|
|
|
|
ld1 {v1.16b}, [x1] /* load final block */
|
|
|
|
ld1 {v2.16b}, [x8]
|
|
|
|
ld1 {v3.16b}, [x9]
|
|
|
|
|
|
|
|
tbl v2.16b, {v0.16b}, v2.16b
|
|
|
|
tbx v0.16b, {v1.16b}, v3.16b
|
|
|
|
st1 {v2.16b}, [x4] /* overlapping stores */
|
|
|
|
mov w4, wzr
|
|
|
|
b .Lxtsencctsout
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_xts_encrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_xts_decrypt)
|
2018-09-10 14:41:13 +00:00
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
mov x29, sp
|
2018-03-10 15:21:51 +00:00
|
|
|
|
2019-09-03 16:43:33 +00:00
|
|
|
/* subtract 16 bytes if we are doing CTS */
|
|
|
|
sub w8, w4, #0x10
|
|
|
|
tst w4, #0xf
|
|
|
|
csel w4, w4, w8, eq
|
|
|
|
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v4.16b}, [x6]
|
2018-10-08 11:16:59 +00:00
|
|
|
xts_load_mask v8
|
2019-09-03 16:43:34 +00:00
|
|
|
xts_cts_skip_tw w7, .Lxtsdecskiptw
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
cbz w7, .Lxtsdecnotfirst
|
|
|
|
|
|
|
|
enc_prepare w3, x5, x8
|
|
|
|
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
|
2019-09-03 16:43:34 +00:00
|
|
|
.Lxtsdecskiptw:
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
dec_prepare w3, x2, x8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LxtsdecNx
|
|
|
|
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
.Lxtsdecnotfirst:
|
2018-09-10 14:41:13 +00:00
|
|
|
dec_prepare w3, x2, x8
|
2014-03-21 09:19:17 +00:00
|
|
|
.LxtsdecloopNx:
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v4, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
.LxtsdecNx:
|
2019-09-03 16:43:33 +00:00
|
|
|
subs w4, w4, #64
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lxtsdec1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v5, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v6, v5, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v7, v6, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v3.16b, v3.16b, v7.16b
|
2018-03-10 15:21:51 +00:00
|
|
|
bl aes_decrypt_block4x
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v3.16b, v3.16b, v7.16b
|
|
|
|
eor v0.16b, v0.16b, v4.16b
|
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
2014-03-21 09:19:17 +00:00
|
|
|
mov v4.16b, v7.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
cbz w4, .Lxtsdecout
|
2018-10-08 11:16:59 +00:00
|
|
|
xts_reload_mask v8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LxtsdecloopNx
|
|
|
|
.Lxtsdec1x:
|
2019-09-03 16:43:33 +00:00
|
|
|
adds w4, w4, #64
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lxtsdecout
|
2019-09-03 16:43:33 +00:00
|
|
|
subs w4, w4, #16
|
2014-03-21 09:19:17 +00:00
|
|
|
.Lxtsdecloop:
|
2019-09-03 16:43:33 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16
|
|
|
|
bmi .Lxtsdeccts
|
|
|
|
.Lxtsdecctsout:
|
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
decrypt_block v0, w3, x2, x8, w7
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b}, [x0], #16
|
2019-09-03 16:43:33 +00:00
|
|
|
cbz w4, .Lxtsdecout
|
|
|
|
subs w4, w4, #16
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v4, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .Lxtsdecloop
|
|
|
|
.Lxtsdecout:
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v4.16b}, [x6]
|
|
|
|
ldp x29, x30, [sp], #16
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2019-09-03 16:43:33 +00:00
|
|
|
|
|
|
|
.Lxtsdeccts:
|
|
|
|
adr_l x8, .Lcts_permute_table
|
|
|
|
|
|
|
|
add x1, x1, w4, sxtw /* rewind input pointer */
|
|
|
|
add w4, w4, #16 /* # bytes in final block */
|
|
|
|
add x9, x8, #32
|
|
|
|
add x8, x8, x4
|
|
|
|
sub x9, x9, x4
|
|
|
|
add x4, x0, x4 /* output address of final block */
|
|
|
|
|
|
|
|
next_tweak v5, v4, v8
|
|
|
|
|
|
|
|
ld1 {v1.16b}, [x1] /* load final block */
|
|
|
|
ld1 {v2.16b}, [x8]
|
|
|
|
ld1 {v3.16b}, [x9]
|
|
|
|
|
|
|
|
eor v0.16b, v0.16b, v5.16b
|
|
|
|
decrypt_block v0, w3, x2, x8, w7
|
|
|
|
eor v0.16b, v0.16b, v5.16b
|
|
|
|
|
|
|
|
tbl v2.16b, {v0.16b}, v2.16b
|
|
|
|
tbx v0.16b, {v1.16b}, v3.16b
|
|
|
|
|
|
|
|
st1 {v2.16b}, [x4] /* overlapping stores */
|
|
|
|
mov w4, wzr
|
|
|
|
b .Lxtsdecctsout
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_xts_decrypt)
|
2017-02-03 14:49:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
|
|
|
* int blocks, u8 dg[], int enc_before, int enc_after)
|
|
|
|
*/
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_mac_update)
|
2021-02-03 11:36:24 +00:00
|
|
|
ld1 {v0.16b}, [x4] /* get dg */
|
2017-02-03 14:49:37 +00:00
|
|
|
enc_prepare w2, x1, x7
|
2018-03-10 15:21:53 +00:00
|
|
|
cbz w5, .Lmacloop4x
|
2017-02-03 14:49:37 +00:00
|
|
|
|
2018-03-10 15:21:53 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
|
|
|
|
|
|
|
.Lmacloop4x:
|
2021-02-03 11:36:24 +00:00
|
|
|
subs w3, w3, #4
|
2018-03-10 15:21:53 +00:00
|
|
|
bmi .Lmac1x
|
2021-02-03 11:36:24 +00:00
|
|
|
ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
|
2018-03-10 15:21:53 +00:00
|
|
|
eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
2018-03-10 15:21:53 +00:00
|
|
|
eor v0.16b, v0.16b, v2.16b
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
2018-03-10 15:21:53 +00:00
|
|
|
eor v0.16b, v0.16b, v3.16b
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
2018-03-10 15:21:53 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2021-02-03 11:36:24 +00:00
|
|
|
cmp w3, wzr
|
|
|
|
csinv x5, x6, xzr, eq
|
2018-03-10 15:21:53 +00:00
|
|
|
cbz w5, .Lmacout
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
|
|
|
st1 {v0.16b}, [x4] /* return dg */
|
2021-03-02 09:01:12 +00:00
|
|
|
cond_yield .Lmacout, x7, x8
|
2018-03-10 15:21:53 +00:00
|
|
|
b .Lmacloop4x
|
|
|
|
.Lmac1x:
|
2021-02-03 11:36:24 +00:00
|
|
|
add w3, w3, #4
|
2017-02-03 14:49:37 +00:00
|
|
|
.Lmacloop:
|
2021-02-03 11:36:24 +00:00
|
|
|
cbz w3, .Lmacout
|
|
|
|
ld1 {v1.16b}, [x0], #16 /* get next pt block */
|
2017-02-03 14:49:37 +00:00
|
|
|
eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
|
|
|
|
|
2021-02-03 11:36:24 +00:00
|
|
|
subs w3, w3, #1
|
|
|
|
csinv x5, x6, xzr, eq
|
2017-02-03 14:49:37 +00:00
|
|
|
cbz w5, .Lmacout
|
|
|
|
|
2018-04-30 16:18:24 +00:00
|
|
|
.Lmacenc:
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
2017-02-03 14:49:37 +00:00
|
|
|
b .Lmacloop
|
|
|
|
|
|
|
|
.Lmacout:
|
2021-02-03 11:36:24 +00:00
|
|
|
st1 {v0.16b}, [x4] /* return dg */
|
|
|
|
mov w0, w3
|
2017-02-03 14:49:37 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_mac_update)
|