forked from Minki/linux
crypto: x86 - add more optimized XTS-mode for serpent-avx
This patch adds AVX optimized XTS-mode helper functions/macros and converts serpent-avx to use the new facilities. Benefits are slightly improved speed and reduced stack usage as use of temporary IV-array is avoided. tcrypt results, with Intel i5-2450M: enc dec 16B 1.00x 1.00x 64B 1.00x 1.00x 256B 1.04x 1.06x 1024B 1.09x 1.09x 8192B 1.10x 1.09x Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
d2049d8566
commit
a05248ed2d
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Shared glue code for 128bit block ciphers, AVX assembler macros
|
||||
*
|
||||
* Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
|
||||
* Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -89,3 +89,62 @@
|
||||
vpxor (6*16)(src), x6, x6; \
|
||||
vpxor (7*16)(src), x7, x7; \
|
||||
store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
|
||||
|
||||
#define gf128mul_x_ble(iv, mask, tmp) \
|
||||
vpsrad $31, iv, tmp; \
|
||||
vpaddq iv, iv, iv; \
|
||||
vpshufd $0x13, tmp, tmp; \
|
||||
vpand mask, tmp, tmp; \
|
||||
vpxor tmp, iv, iv;
|
||||
|
||||
#define load_xts_8way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, t0, \
|
||||
t1, xts_gf128mul_and_shl1_mask) \
|
||||
vmovdqa xts_gf128mul_and_shl1_mask, t0; \
|
||||
\
|
||||
/* load IV */ \
|
||||
vmovdqu (iv), tiv; \
|
||||
vpxor (0*16)(src), tiv, x0; \
|
||||
vmovdqu tiv, (0*16)(dst); \
|
||||
\
|
||||
/* construct and store IVs, also xor with source */ \
|
||||
gf128mul_x_ble(tiv, t0, t1); \
|
||||
vpxor (1*16)(src), tiv, x1; \
|
||||
vmovdqu tiv, (1*16)(dst); \
|
||||
\
|
||||
gf128mul_x_ble(tiv, t0, t1); \
|
||||
vpxor (2*16)(src), tiv, x2; \
|
||||
vmovdqu tiv, (2*16)(dst); \
|
||||
\
|
||||
gf128mul_x_ble(tiv, t0, t1); \
|
||||
vpxor (3*16)(src), tiv, x3; \
|
||||
vmovdqu tiv, (3*16)(dst); \
|
||||
\
|
||||
gf128mul_x_ble(tiv, t0, t1); \
|
||||
vpxor (4*16)(src), tiv, x4; \
|
||||
vmovdqu tiv, (4*16)(dst); \
|
||||
\
|
||||
gf128mul_x_ble(tiv, t0, t1); \
|
||||
vpxor (5*16)(src), tiv, x5; \
|
||||
vmovdqu tiv, (5*16)(dst); \
|
||||
\
|
||||
gf128mul_x_ble(tiv, t0, t1); \
|
||||
vpxor (6*16)(src), tiv, x6; \
|
||||
vmovdqu tiv, (6*16)(dst); \
|
||||
\
|
||||
gf128mul_x_ble(tiv, t0, t1); \
|
||||
vpxor (7*16)(src), tiv, x7; \
|
||||
vmovdqu tiv, (7*16)(dst); \
|
||||
\
|
||||
gf128mul_x_ble(tiv, t0, t1); \
|
||||
vmovdqu tiv, (iv);
|
||||
|
||||
#define store_xts_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
|
||||
vpxor (0*16)(dst), x0, x0; \
|
||||
vpxor (1*16)(dst), x1, x1; \
|
||||
vpxor (2*16)(dst), x2, x2; \
|
||||
vpxor (3*16)(dst), x3, x3; \
|
||||
vpxor (4*16)(dst), x4, x4; \
|
||||
vpxor (5*16)(dst), x5, x5; \
|
||||
vpxor (6*16)(dst), x6, x6; \
|
||||
vpxor (7*16)(dst), x7, x7; \
|
||||
store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Shared glue code for 128bit block ciphers
|
||||
*
|
||||
* Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
|
||||
* Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
|
||||
*
|
||||
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
|
||||
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
@ -304,4 +304,99 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
|
||||
|
||||
static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
void *ctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
const unsigned int bsize = 128 / 8;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u128 *src = (u128 *)walk->src.virt.addr;
|
||||
u128 *dst = (u128 *)walk->dst.virt.addr;
|
||||
unsigned int num_blocks, func_bytes;
|
||||
unsigned int i;
|
||||
|
||||
/* Process multi-block batch */
|
||||
for (i = 0; i < gctx->num_funcs; i++) {
|
||||
num_blocks = gctx->funcs[i].num_blocks;
|
||||
func_bytes = bsize * num_blocks;
|
||||
|
||||
if (nbytes >= func_bytes) {
|
||||
do {
|
||||
gctx->funcs[i].fn_u.xts(ctx, dst, src,
|
||||
(le128 *)walk->iv);
|
||||
|
||||
src += num_blocks;
|
||||
dst += num_blocks;
|
||||
nbytes -= func_bytes;
|
||||
} while (nbytes >= func_bytes);
|
||||
|
||||
if (nbytes < bsize)
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
/* for implementations implementing faster XTS IV generator */
|
||||
int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes,
|
||||
void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
|
||||
void *tweak_ctx, void *crypt_ctx)
|
||||
{
|
||||
const unsigned int bsize = 128 / 8;
|
||||
bool fpu_enabled = false;
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
nbytes = walk.nbytes;
|
||||
if (!nbytes)
|
||||
return err;
|
||||
|
||||
/* set minimum length to bsize, for tweak_fn */
|
||||
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
||||
desc, fpu_enabled,
|
||||
nbytes < bsize ? bsize : nbytes);
|
||||
|
||||
/* calculate first value of T */
|
||||
tweak_fn(tweak_ctx, walk.iv, walk.iv);
|
||||
|
||||
while (nbytes) {
|
||||
nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = walk.nbytes;
|
||||
}
|
||||
|
||||
glue_fpu_end(fpu_enabled);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
|
||||
|
||||
void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
|
||||
common_glue_func_t fn)
|
||||
{
|
||||
le128 ivblk = *iv;
|
||||
|
||||
/* generate next IV */
|
||||
le128_gf128mul_x_ble(iv, &ivblk);
|
||||
|
||||
/* CC <- T xor C */
|
||||
u128_xor(dst, src, (u128 *)&ivblk);
|
||||
|
||||
/* PP <- D(Key2,CC) */
|
||||
fn(ctx, (u8 *)dst, (u8 *)dst);
|
||||
|
||||
/* P <- T xor PP */
|
||||
u128_xor(dst, dst, (u128 *)&ivblk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -4,8 +4,7 @@
|
||||
* Copyright (C) 2012 Johannes Goetzfried
|
||||
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
|
||||
*
|
||||
* Based on arch/x86/crypto/serpent-sse2-x86_64-asm_64.S by
|
||||
* Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
|
||||
* Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -34,6 +33,8 @@
|
||||
|
||||
.Lbswap128_mask:
|
||||
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
|
||||
.Lxts_gf128mul_and_shl1_mask:
|
||||
.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
.text
|
||||
|
||||
@ -739,3 +740,43 @@ ENTRY(serpent_ctr_8way_avx)
|
||||
|
||||
ret;
|
||||
ENDPROC(serpent_ctr_8way_avx)
|
||||
|
||||
ENTRY(serpent_xts_enc_8way_avx)
|
||||
/* input:
|
||||
* %rdi: ctx, CTX
|
||||
* %rsi: dst
|
||||
* %rdx: src
|
||||
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
|
||||
*/
|
||||
|
||||
/* regs <= src, dst <= IVs, regs <= regs xor IVs */
|
||||
load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
|
||||
RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask);
|
||||
|
||||
call __serpent_enc_blk8_avx;
|
||||
|
||||
/* dst <= regs xor IVs(in dst) */
|
||||
store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
||||
|
||||
ret;
|
||||
ENDPROC(serpent_xts_enc_8way_avx)
|
||||
|
||||
ENTRY(serpent_xts_dec_8way_avx)
|
||||
/* input:
|
||||
* %rdi: ctx, CTX
|
||||
* %rsi: dst
|
||||
* %rdx: src
|
||||
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
|
||||
*/
|
||||
|
||||
/* regs <= src, dst <= IVs, regs <= regs xor IVs */
|
||||
load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
|
||||
RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask);
|
||||
|
||||
call __serpent_dec_blk8_avx;
|
||||
|
||||
/* dst <= regs xor IVs(in dst) */
|
||||
store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
|
||||
|
||||
ret;
|
||||
ENDPROC(serpent_xts_dec_8way_avx)
|
||||
|
@ -4,8 +4,7 @@
|
||||
* Copyright (C) 2012 Johannes Goetzfried
|
||||
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
|
||||
*
|
||||
* Glue code based on serpent_sse2_glue.c by:
|
||||
* Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
|
||||
* Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -53,6 +52,18 @@ static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
||||
u128_xor(dst, src, (u128 *)&ctrblk);
|
||||
}
|
||||
|
||||
static void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
||||
{
|
||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
||||
GLUE_FUNC_CAST(__serpent_encrypt));
|
||||
}
|
||||
|
||||
static void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
||||
{
|
||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
||||
GLUE_FUNC_CAST(__serpent_decrypt));
|
||||
}
|
||||
|
||||
static const struct common_glue_ctx serpent_enc = {
|
||||
.num_funcs = 2,
|
||||
.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
|
||||
@ -79,6 +90,19 @@ static const struct common_glue_ctx serpent_ctr = {
|
||||
} }
|
||||
};
|
||||
|
||||
static const struct common_glue_ctx serpent_enc_xts = {
|
||||
.num_funcs = 2,
|
||||
.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
|
||||
|
||||
.funcs = { {
|
||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
|
||||
}, {
|
||||
.num_blocks = 1,
|
||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
|
||||
} }
|
||||
};
|
||||
|
||||
static const struct common_glue_ctx serpent_dec = {
|
||||
.num_funcs = 2,
|
||||
.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
|
||||
@ -105,6 +129,19 @@ static const struct common_glue_ctx serpent_dec_cbc = {
|
||||
} }
|
||||
};
|
||||
|
||||
static const struct common_glue_ctx serpent_dec_xts = {
|
||||
.num_funcs = 2,
|
||||
.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
|
||||
|
||||
.funcs = { {
|
||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
|
||||
}, {
|
||||
.num_blocks = 1,
|
||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
@ -299,54 +336,20 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->crypt_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct xts_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.tweak_ctx = &ctx->tweak_ctx,
|
||||
.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = xts_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->crypt_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct xts_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.tweak_ctx = &ctx->tweak_ctx,
|
||||
.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = xts_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static struct crypto_alg serpent_algs[10] = { {
|
||||
|
@ -14,10 +14,13 @@ typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
|
||||
typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
|
||||
typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
|
||||
le128 *iv);
|
||||
typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
|
||||
le128 *iv);
|
||||
|
||||
#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
|
||||
#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
|
||||
#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
|
||||
#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))
|
||||
|
||||
struct common_glue_func_entry {
|
||||
unsigned int num_blocks; /* number of blocks that @fn will process */
|
||||
@ -25,6 +28,7 @@ struct common_glue_func_entry {
|
||||
common_glue_func_t ecb;
|
||||
common_glue_cbc_func_t cbc;
|
||||
common_glue_ctr_func_t ctr;
|
||||
common_glue_xts_func_t xts;
|
||||
} fn_u;
|
||||
};
|
||||
|
||||
@ -96,6 +100,16 @@ static inline void le128_inc(le128 *i)
|
||||
i->b = cpu_to_le64(b);
|
||||
}
|
||||
|
||||
static inline void le128_gf128mul_x_ble(le128 *dst, const le128 *src)
|
||||
{
|
||||
u64 a = le64_to_cpu(src->a);
|
||||
u64 b = le64_to_cpu(src->b);
|
||||
u64 _tt = ((s64)a >> 63) & 0x87;
|
||||
|
||||
dst->a = cpu_to_le64((a << 1) ^ (b >> 63));
|
||||
dst->b = cpu_to_le64((b << 1) ^ _tt);
|
||||
}
|
||||
|
||||
extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
@ -118,4 +132,14 @@ extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes);
|
||||
|
||||
extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes,
|
||||
common_glue_func_t tweak_fn, void *tweak_ctx,
|
||||
void *crypt_ctx);
|
||||
|
||||
extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
|
||||
le128 *iv, common_glue_func_t fn);
|
||||
|
||||
#endif /* _CRYPTO_GLUE_HELPER_H */
|
||||
|
@ -16,4 +16,9 @@ asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
||||
asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
||||
const u8 *src, le128 *iv);
|
||||
|
||||
asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
||||
const u8 *src, le128 *iv);
|
||||
asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
||||
const u8 *src, le128 *iv);
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user