linux/include/crypto/chacha.h

105 lines
3.1 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common values and helper functions for the ChaCha and XChaCha stream ciphers.
*
* XChaCha extends ChaCha's nonce to 192 bits, while provably retaining ChaCha's
* security. Here they share the same key size, tfm context, and setkey
* function; only their IV size and encrypt/decrypt function differ.
crypto: chacha - add XChaCha12 support Now that the generic implementation of ChaCha20 has been refactored to allow varying the number of rounds, add support for XChaCha12, which is the XSalsa construction applied to ChaCha12. ChaCha12 is one of the three ciphers specified by the original ChaCha paper (https://cr.yp.to/chacha/chacha-20080128.pdf: "ChaCha, a variant of Salsa20"), alongside ChaCha8 and ChaCha20. ChaCha12 is faster than ChaCha20 but has a lower, but still large, security margin. We need XChaCha12 support so that it can be used in the Adiantum encryption mode, which enables disk/file encryption on low-end mobile devices where AES-XTS is too slow as the CPUs lack AES instructions. We'd prefer XChaCha20 (the more popular variant), but it's too slow on some of our target devices, so at least in some cases we do need the XChaCha12-based version. In more detail, the problem is that Adiantum is still much slower than we're happy with, and encryption still has a quite noticeable effect on the feel of low-end devices. Users and vendors push back hard against encryption that degrades the user experience, which always risks encryption being disabled entirely. So we need to choose the fastest option that gives us a solid margin of security, and here that's XChaCha12. The best known attack on ChaCha breaks only 7 rounds and has 2^235 time complexity, so ChaCha12's security margin is still better than AES-256's. Much has been learned about cryptanalysis of ARX ciphers since Salsa20 was originally designed in 2005, and it now seems we can be comfortable with a smaller number of rounds. The eSTREAM project also suggests the 12-round version of Salsa20 as providing the best balance among the different variants: combining very good performance with a "comfortable margin of security". Note that it would be trivial to add vanilla ChaCha12 in addition to XChaCha12. However, it's unneeded for now and therefore is omitted. As discussed in the patch that introduced XChaCha20 support, I considered splitting the code into separate chacha-common, chacha20, xchacha20, and xchacha12 modules, so that these algorithms could be enabled/disabled independently. However, since nearly all the code is shared anyway, I ultimately decided there would have been little benefit to the added complexity. Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Acked-by: Martin Willi <martin@strongswan.org> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-11-17 01:26:22 +00:00
*
* The ChaCha paper specifies 20, 12, and 8-round variants. In general, it is
* recommended to use the 20-round variant ChaCha20. However, the other
* variants can be needed in some performance-sensitive scenarios. The generic
* ChaCha code currently allows only the 20 and 12-round variants.
*/
#ifndef _CRYPTO_CHACHA_H
#define _CRYPTO_CHACHA_H
#include <asm/unaligned.h>
#include <linux/types.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
#define CHACHA_IV_SIZE 16
#define CHACHA_KEY_SIZE 32
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
void chacha_block_generic(u32 *state, u8 *stream, int nrounds);
static inline void chacha20_block(u32 *state, u8 *stream)
{
chacha_block_generic(state, stream, 20);
}
void hchacha_block_arch(const u32 *state, u32 *out, int nrounds);
void hchacha_block_generic(const u32 *state, u32 *out, int nrounds);
static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
hchacha_block_arch(state, out, nrounds);
else
hchacha_block_generic(state, out, nrounds);
}
static inline void chacha_init_consts(u32 *state)
{
state[0] = 0x61707865; /* "expa" */
state[1] = 0x3320646e; /* "nd 3" */
state[2] = 0x79622d32; /* "2-by" */
state[3] = 0x6b206574; /* "te k" */
}
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
{
chacha_init_consts(state);
state[4] = key[0];
state[5] = key[1];
state[6] = key[2];
state[7] = key[3];
state[8] = key[4];
state[9] = key[5];
state[10] = key[6];
state[11] = key[7];
state[12] = get_unaligned_le32(iv + 0);
state[13] = get_unaligned_le32(iv + 4);
state[14] = get_unaligned_le32(iv + 8);
state[15] = get_unaligned_le32(iv + 12);
}
static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
chacha_init_arch(state, key, iv);
else
chacha_init_generic(state, key, iv);
}
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
chacha_crypt_arch(state, dst, src, bytes, nrounds);
else
chacha_crypt_generic(state, dst, src, bytes, nrounds);
}
static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
chacha_crypt(state, dst, src, bytes, 20);
}
#endif /* _CRYPTO_CHACHA_H */