mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
crypto: gf128mul - switch gf128mul_x_ble to le128
Currently, gf128mul_x_ble works with pointers to be128, even though it actually interprets the words as little-endian. Consequently, it uses cpu_to_le64/le64_to_cpu on fields of type __be64, which is incorrect. This patch fixes that by changing the function to accept pointers to le128 and updating all users accordingly. Signed-off-by: Ondrej Mosnacek <omosnacek@gmail.com> Reviewd-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
acb9b159c7
commit
e55318c84f
@ -1522,7 +1522,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||||||
struct scatterlist *src, unsigned int nbytes)
|
struct scatterlist *src, unsigned int nbytes)
|
||||||
{
|
{
|
||||||
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||||
be128 buf[2 * 4];
|
le128 buf[2 * 4];
|
||||||
struct xts_crypt_req req = {
|
struct xts_crypt_req req = {
|
||||||
.tbuf = buf,
|
.tbuf = buf,
|
||||||
.tbuflen = sizeof(buf),
|
.tbuflen = sizeof(buf),
|
||||||
@ -1540,7 +1540,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||||||
struct scatterlist *src, unsigned int nbytes)
|
struct scatterlist *src, unsigned int nbytes)
|
||||||
{
|
{
|
||||||
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||||
be128 buf[2 * 4];
|
le128 buf[2 * 4];
|
||||||
struct xts_crypt_req req = {
|
struct xts_crypt_req req = {
|
||||||
.tbuf = buf,
|
.tbuf = buf,
|
||||||
.tbuflen = sizeof(buf),
|
.tbuflen = sizeof(buf),
|
||||||
|
@ -328,7 +328,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||||||
struct scatterlist *src, unsigned int nbytes)
|
struct scatterlist *src, unsigned int nbytes)
|
||||||
{
|
{
|
||||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||||
be128 buf[SERPENT_PARALLEL_BLOCKS];
|
le128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||||
struct crypt_priv crypt_ctx = {
|
struct crypt_priv crypt_ctx = {
|
||||||
.ctx = &ctx->crypt_ctx,
|
.ctx = &ctx->crypt_ctx,
|
||||||
.fpu_enabled = false,
|
.fpu_enabled = false,
|
||||||
@ -355,7 +355,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||||||
struct scatterlist *src, unsigned int nbytes)
|
struct scatterlist *src, unsigned int nbytes)
|
||||||
{
|
{
|
||||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||||
be128 buf[SERPENT_PARALLEL_BLOCKS];
|
le128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||||
struct crypt_priv crypt_ctx = {
|
struct crypt_priv crypt_ctx = {
|
||||||
.ctx = &ctx->crypt_ctx,
|
.ctx = &ctx->crypt_ctx,
|
||||||
.fpu_enabled = false,
|
.fpu_enabled = false,
|
||||||
|
@ -296,7 +296,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||||||
struct scatterlist *src, unsigned int nbytes)
|
struct scatterlist *src, unsigned int nbytes)
|
||||||
{
|
{
|
||||||
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||||
be128 buf[3];
|
le128 buf[3];
|
||||||
struct xts_crypt_req req = {
|
struct xts_crypt_req req = {
|
||||||
.tbuf = buf,
|
.tbuf = buf,
|
||||||
.tbuflen = sizeof(buf),
|
.tbuflen = sizeof(buf),
|
||||||
@ -314,7 +314,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||||||
struct scatterlist *src, unsigned int nbytes)
|
struct scatterlist *src, unsigned int nbytes)
|
||||||
{
|
{
|
||||||
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||||
be128 buf[3];
|
le128 buf[3];
|
||||||
struct xts_crypt_req req = {
|
struct xts_crypt_req req = {
|
||||||
.tbuf = buf,
|
.tbuf = buf,
|
||||||
.tbuflen = sizeof(buf),
|
.tbuflen = sizeof(buf),
|
||||||
|
38
crypto/xts.c
38
crypto/xts.c
@ -39,11 +39,11 @@ struct xts_instance_ctx {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct rctx {
|
struct rctx {
|
||||||
be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
|
le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
|
||||||
|
|
||||||
be128 t;
|
le128 t;
|
||||||
|
|
||||||
be128 *ext;
|
le128 *ext;
|
||||||
|
|
||||||
struct scatterlist srcbuf[2];
|
struct scatterlist srcbuf[2];
|
||||||
struct scatterlist dstbuf[2];
|
struct scatterlist dstbuf[2];
|
||||||
@ -99,7 +99,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
|||||||
static int post_crypt(struct skcipher_request *req)
|
static int post_crypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct rctx *rctx = skcipher_request_ctx(req);
|
struct rctx *rctx = skcipher_request_ctx(req);
|
||||||
be128 *buf = rctx->ext ?: rctx->buf;
|
le128 *buf = rctx->ext ?: rctx->buf;
|
||||||
struct skcipher_request *subreq;
|
struct skcipher_request *subreq;
|
||||||
const int bs = XTS_BLOCK_SIZE;
|
const int bs = XTS_BLOCK_SIZE;
|
||||||
struct skcipher_walk w;
|
struct skcipher_walk w;
|
||||||
@ -112,12 +112,12 @@ static int post_crypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
while (w.nbytes) {
|
while (w.nbytes) {
|
||||||
unsigned int avail = w.nbytes;
|
unsigned int avail = w.nbytes;
|
||||||
be128 *wdst;
|
le128 *wdst;
|
||||||
|
|
||||||
wdst = w.dst.virt.addr;
|
wdst = w.dst.virt.addr;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
be128_xor(wdst, buf++, wdst);
|
le128_xor(wdst, buf++, wdst);
|
||||||
wdst++;
|
wdst++;
|
||||||
} while ((avail -= bs) >= bs);
|
} while ((avail -= bs) >= bs);
|
||||||
|
|
||||||
@ -150,7 +150,7 @@ out:
|
|||||||
static int pre_crypt(struct skcipher_request *req)
|
static int pre_crypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct rctx *rctx = skcipher_request_ctx(req);
|
struct rctx *rctx = skcipher_request_ctx(req);
|
||||||
be128 *buf = rctx->ext ?: rctx->buf;
|
le128 *buf = rctx->ext ?: rctx->buf;
|
||||||
struct skcipher_request *subreq;
|
struct skcipher_request *subreq;
|
||||||
const int bs = XTS_BLOCK_SIZE;
|
const int bs = XTS_BLOCK_SIZE;
|
||||||
struct skcipher_walk w;
|
struct skcipher_walk w;
|
||||||
@ -174,15 +174,15 @@ static int pre_crypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
while (w.nbytes) {
|
while (w.nbytes) {
|
||||||
unsigned int avail = w.nbytes;
|
unsigned int avail = w.nbytes;
|
||||||
be128 *wsrc;
|
le128 *wsrc;
|
||||||
be128 *wdst;
|
le128 *wdst;
|
||||||
|
|
||||||
wsrc = w.src.virt.addr;
|
wsrc = w.src.virt.addr;
|
||||||
wdst = w.dst.virt.addr;
|
wdst = w.dst.virt.addr;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
*buf++ = rctx->t;
|
*buf++ = rctx->t;
|
||||||
be128_xor(wdst++, &rctx->t, wsrc++);
|
le128_xor(wdst++, &rctx->t, wsrc++);
|
||||||
gf128mul_x_ble(&rctx->t, &rctx->t);
|
gf128mul_x_ble(&rctx->t, &rctx->t);
|
||||||
} while ((avail -= bs) >= bs);
|
} while ((avail -= bs) >= bs);
|
||||||
|
|
||||||
@ -353,8 +353,8 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
|
|||||||
const unsigned int max_blks = req->tbuflen / bsize;
|
const unsigned int max_blks = req->tbuflen / bsize;
|
||||||
struct blkcipher_walk walk;
|
struct blkcipher_walk walk;
|
||||||
unsigned int nblocks;
|
unsigned int nblocks;
|
||||||
be128 *src, *dst, *t;
|
le128 *src, *dst, *t;
|
||||||
be128 *t_buf = req->tbuf;
|
le128 *t_buf = req->tbuf;
|
||||||
int err, i;
|
int err, i;
|
||||||
|
|
||||||
BUG_ON(max_blks < 1);
|
BUG_ON(max_blks < 1);
|
||||||
@ -367,8 +367,8 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
nblocks = min(nbytes / bsize, max_blks);
|
nblocks = min(nbytes / bsize, max_blks);
|
||||||
src = (be128 *)walk.src.virt.addr;
|
src = (le128 *)walk.src.virt.addr;
|
||||||
dst = (be128 *)walk.dst.virt.addr;
|
dst = (le128 *)walk.dst.virt.addr;
|
||||||
|
|
||||||
/* calculate first value of T */
|
/* calculate first value of T */
|
||||||
req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
|
req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
|
||||||
@ -384,7 +384,7 @@ first:
|
|||||||
t = &t_buf[i];
|
t = &t_buf[i];
|
||||||
|
|
||||||
/* PP <- T xor P */
|
/* PP <- T xor P */
|
||||||
be128_xor(dst + i, t, src + i);
|
le128_xor(dst + i, t, src + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* CC <- E(Key2,PP) */
|
/* CC <- E(Key2,PP) */
|
||||||
@ -393,7 +393,7 @@ first:
|
|||||||
|
|
||||||
/* C <- T xor CC */
|
/* C <- T xor CC */
|
||||||
for (i = 0; i < nblocks; i++)
|
for (i = 0; i < nblocks; i++)
|
||||||
be128_xor(dst + i, dst + i, &t_buf[i]);
|
le128_xor(dst + i, dst + i, &t_buf[i]);
|
||||||
|
|
||||||
src += nblocks;
|
src += nblocks;
|
||||||
dst += nblocks;
|
dst += nblocks;
|
||||||
@ -401,7 +401,7 @@ first:
|
|||||||
nblocks = min(nbytes / bsize, max_blks);
|
nblocks = min(nbytes / bsize, max_blks);
|
||||||
} while (nblocks > 0);
|
} while (nblocks > 0);
|
||||||
|
|
||||||
*(be128 *)walk.iv = *t;
|
*(le128 *)walk.iv = *t;
|
||||||
|
|
||||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||||
nbytes = walk.nbytes;
|
nbytes = walk.nbytes;
|
||||||
@ -409,8 +409,8 @@ first:
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
nblocks = min(nbytes / bsize, max_blks);
|
nblocks = min(nbytes / bsize, max_blks);
|
||||||
src = (be128 *)walk.src.virt.addr;
|
src = (le128 *)walk.src.virt.addr;
|
||||||
dst = (be128 *)walk.dst.virt.addr;
|
dst = (le128 *)walk.dst.virt.addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -205,16 +205,16 @@ static inline void gf128mul_x_bbe(be128 *r, const be128 *x)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* needed by XTS */
|
/* needed by XTS */
|
||||||
static inline void gf128mul_x_ble(be128 *r, const be128 *x)
|
static inline void gf128mul_x_ble(le128 *r, const le128 *x)
|
||||||
{
|
{
|
||||||
u64 a = le64_to_cpu(x->a);
|
u64 a = le64_to_cpu(x->a);
|
||||||
u64 b = le64_to_cpu(x->b);
|
u64 b = le64_to_cpu(x->b);
|
||||||
|
|
||||||
/* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
|
/* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
|
||||||
u64 _tt = gf128mul_mask_from_bit(b, 63) & 0x87;
|
u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
|
||||||
|
|
||||||
r->a = cpu_to_le64((a << 1) ^ _tt);
|
r->a = cpu_to_le64((a << 1) | (b >> 63));
|
||||||
r->b = cpu_to_le64((b << 1) | (a >> 63));
|
r->b = cpu_to_le64((b << 1) ^ _tt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 4k table optimization */
|
/* 4k table optimization */
|
||||||
|
@ -11,7 +11,7 @@ struct blkcipher_desc;
|
|||||||
#define XTS_BLOCK_SIZE 16
|
#define XTS_BLOCK_SIZE 16
|
||||||
|
|
||||||
struct xts_crypt_req {
|
struct xts_crypt_req {
|
||||||
be128 *tbuf;
|
le128 *tbuf;
|
||||||
unsigned int tbuflen;
|
unsigned int tbuflen;
|
||||||
|
|
||||||
void *tweak_ctx;
|
void *tweak_ctx;
|
||||||
|
Loading…
Reference in New Issue
Block a user