Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (31 commits) crypto: aes_generic - Fix checkpatch errors crypto: fcrypt - Fix checkpatch errors crypto: ecb - Fix checkpatch errors crypto: des_generic - Fix checkpatch errors crypto: deflate - Fix checkpatch errors crypto: crypto_null - Fix checkpatch errors crypto: cipher - Fix checkpatch errors crypto: crc32 - Fix checkpatch errors crypto: compress - Fix checkpatch errors crypto: cast6 - Fix checkpatch errors crypto: cast5 - Fix checkpatch errors crypto: camellia - Fix checkpatch errors crypto: authenc - Fix checkpatch errors crypto: api - Fix checkpatch errors crypto: anubis - Fix checkpatch errors crypto: algapi - Fix checkpatch errors crypto: blowfish - Fix checkpatch errors crypto: aead - Fix checkpatch errors crypto: ablkcipher - Fix checkpatch errors crypto: pcrypt - call the complete function on error ...
This commit is contained in:
commit
37d4008484
@ -86,11 +86,19 @@ static struct amba_device cpu8815_amba_gpio[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct amba_device cpu8815_amba_rng = {
|
||||
.dev = {
|
||||
.init_name = "rng",
|
||||
},
|
||||
__MEM_4K_RESOURCE(NOMADIK_RNG_BASE),
|
||||
};
|
||||
|
||||
static struct amba_device *amba_devs[] __initdata = {
|
||||
cpu8815_amba_gpio + 0,
|
||||
cpu8815_amba_gpio + 1,
|
||||
cpu8815_amba_gpio + 2,
|
||||
cpu8815_amba_gpio + 3,
|
||||
&cpu8815_amba_rng
|
||||
};
|
||||
|
||||
static int __init cpu8815_init(void)
|
||||
|
@ -78,14 +78,14 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
|
||||
sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
|
||||
if (ret) {
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
|
||||
tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
}
|
||||
return ret;
|
||||
|
@ -114,6 +114,16 @@ config CRYPTO_NULL
|
||||
help
|
||||
These are 'Null' algorithms, used by IPsec, which do nothing.
|
||||
|
||||
config CRYPTO_PCRYPT
|
||||
tristate "Parallel crypto engine (EXPERIMENTAL)"
|
||||
depends on SMP && EXPERIMENTAL
|
||||
select PADATA
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_AEAD
|
||||
help
|
||||
This converts an arbitrary crypto algorithm into a parallel
|
||||
algorithm that executes in kernel threads.
|
||||
|
||||
config CRYPTO_WORKQUEUE
|
||||
tristate
|
||||
|
||||
|
@ -56,6 +56,7 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
|
||||
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
|
||||
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
|
||||
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
|
||||
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
|
||||
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
|
||||
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
|
||||
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
|
||||
|
@ -469,8 +469,7 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
u32 kappa[ANUBIS_MAX_N];
|
||||
u32 inter[ANUBIS_MAX_N];
|
||||
|
||||
switch (key_len)
|
||||
{
|
||||
switch (key_len) {
|
||||
case 16: case 20: case 24: case 28:
|
||||
case 32: case 36: case 40:
|
||||
break;
|
||||
@ -530,24 +529,25 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
/*
|
||||
* compute kappa^{r+1} from kappa^r:
|
||||
*/
|
||||
if (r == R) {
|
||||
if (r == R)
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < N; i++) {
|
||||
int j = i;
|
||||
inter[i] = T0[(kappa[j--] >> 24) ];
|
||||
if (j < 0) j = N - 1;
|
||||
if (j < 0)
|
||||
j = N - 1;
|
||||
inter[i] ^= T1[(kappa[j--] >> 16) & 0xff];
|
||||
if (j < 0) j = N - 1;
|
||||
if (j < 0)
|
||||
j = N - 1;
|
||||
inter[i] ^= T2[(kappa[j--] >> 8) & 0xff];
|
||||
if (j < 0) j = N - 1;
|
||||
if (j < 0)
|
||||
j = N - 1;
|
||||
inter[i] ^= T3[(kappa[j ] ) & 0xff];
|
||||
}
|
||||
kappa[0] = inter[0] ^ rc[r];
|
||||
for (i = 1; i < N; i++) {
|
||||
for (i = 1; i < N; i++)
|
||||
kappa[i] = inter[i];
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* generate inverse key schedule: K'^0 = K^R, K'^R =
|
||||
|
@ -318,7 +318,6 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
|
||||
|
||||
default:
|
||||
BUG();
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -369,7 +369,8 @@ static const u8 Tr[4][8] = {
|
||||
};
|
||||
|
||||
/* forward octave */
|
||||
static void W(u32 *key, unsigned int i) {
|
||||
static void W(u32 *key, unsigned int i)
|
||||
{
|
||||
u32 I;
|
||||
key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
|
||||
key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
|
||||
@ -407,8 +408,6 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
key[6] = be32_to_cpu(p_key[6]); /* G */
|
||||
key[7] = be32_to_cpu(p_key[7]); /* H */
|
||||
|
||||
|
||||
|
||||
for (i = 0; i < 12; i++) {
|
||||
W(key, 2 * i);
|
||||
W(key, 2 * i + 1);
|
||||
@ -428,7 +427,8 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
}
|
||||
|
||||
/*forward quad round*/
|
||||
static void Q (u32 * block, u8 * Kr, u32 * Km) {
|
||||
static void Q(u32 *block, u8 *Kr, u32 *Km)
|
||||
{
|
||||
u32 I;
|
||||
block[2] ^= F1(block[3], Kr[0], Km[0]);
|
||||
block[1] ^= F2(block[2], Kr[1], Km[1]);
|
||||
@ -437,7 +437,8 @@ static void Q (u32 * block, u8 * Kr, u32 * Km) {
|
||||
}
|
||||
|
||||
/*reverse quad round*/
|
||||
static void QBAR (u32 * block, u8 * Kr, u32 * Km) {
|
||||
static void QBAR(u32 *block, u8 *Kr, u32 *Km)
|
||||
{
|
||||
u32 I;
|
||||
block[3] ^= F1(block[0], Kr[3], Km[3]);
|
||||
block[0] ^= F3(block[1], Kr[2], Km[2]);
|
||||
@ -478,7 +479,8 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
|
||||
dst[3] = cpu_to_be32(block[3]);
|
||||
}
|
||||
|
||||
static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) {
|
||||
static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
|
||||
{
|
||||
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
|
||||
const __be32 *src = (const __be32 *)inbuf;
|
||||
__be32 *dst = (__be32 *)outbuf;
|
||||
|
@ -869,8 +869,7 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
|
||||
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
|
||||
(*flags & CRYPTO_TFM_REQ_WEAK_KEY))
|
||||
{
|
||||
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
287
crypto/gcm.c
287
crypto/gcm.c
@ -37,6 +37,19 @@ struct crypto_rfc4106_ctx {
|
||||
u8 nonce[4];
|
||||
};
|
||||
|
||||
struct crypto_rfc4543_ctx {
|
||||
struct crypto_aead *child;
|
||||
u8 nonce[4];
|
||||
};
|
||||
|
||||
struct crypto_rfc4543_req_ctx {
|
||||
u8 auth_tag[16];
|
||||
struct scatterlist cipher[1];
|
||||
struct scatterlist payload[2];
|
||||
struct scatterlist assoc[2];
|
||||
struct aead_request subreq;
|
||||
};
|
||||
|
||||
struct crypto_gcm_ghash_ctx {
|
||||
unsigned int cryptlen;
|
||||
struct scatterlist *src;
|
||||
@ -1047,6 +1060,272 @@ static struct crypto_template crypto_rfc4106_tmpl = {
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx(
|
||||
struct aead_request *req)
|
||||
{
|
||||
unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
|
||||
|
||||
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
|
||||
}
|
||||
|
||||
static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
|
||||
struct crypto_aead *child = ctx->child;
|
||||
int err;
|
||||
|
||||
if (keylen < 4)
|
||||
return -EINVAL;
|
||||
|
||||
keylen -= 4;
|
||||
memcpy(ctx->nonce, key + keylen, 4);
|
||||
|
||||
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_aead_setkey(child, key, keylen);
|
||||
crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
|
||||
|
||||
if (authsize != 16)
|
||||
return -EINVAL;
|
||||
|
||||
return crypto_aead_setauthsize(ctx->child, authsize);
|
||||
}
|
||||
|
||||
/* this is the same as crypto_authenc_chain */
|
||||
static void crypto_rfc4543_chain(struct scatterlist *head,
|
||||
struct scatterlist *sg, int chain)
|
||||
{
|
||||
if (chain) {
|
||||
head->length += sg->length;
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
|
||||
if (sg)
|
||||
scatterwalk_sg_chain(head, 2, sg);
|
||||
else
|
||||
sg_mark_end(head);
|
||||
}
|
||||
|
||||
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
|
||||
int enc)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
|
||||
struct aead_request *subreq = &rctx->subreq;
|
||||
struct scatterlist *dst = req->dst;
|
||||
struct scatterlist *cipher = rctx->cipher;
|
||||
struct scatterlist *payload = rctx->payload;
|
||||
struct scatterlist *assoc = rctx->assoc;
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
unsigned int assoclen = req->assoclen;
|
||||
struct page *dstp;
|
||||
u8 *vdst;
|
||||
u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
|
||||
crypto_aead_alignmask(ctx->child) + 1);
|
||||
|
||||
memcpy(iv, ctx->nonce, 4);
|
||||
memcpy(iv + 4, req->iv, 8);
|
||||
|
||||
/* construct cipher/plaintext */
|
||||
if (enc)
|
||||
memset(rctx->auth_tag, 0, authsize);
|
||||
else
|
||||
scatterwalk_map_and_copy(rctx->auth_tag, dst,
|
||||
req->cryptlen - authsize,
|
||||
authsize, 0);
|
||||
|
||||
sg_init_one(cipher, rctx->auth_tag, authsize);
|
||||
|
||||
/* construct the aad */
|
||||
dstp = sg_page(dst);
|
||||
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
|
||||
|
||||
sg_init_table(payload, 2);
|
||||
sg_set_buf(payload, req->iv, 8);
|
||||
crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
|
||||
assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
|
||||
|
||||
sg_init_table(assoc, 2);
|
||||
sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
|
||||
req->assoc->offset);
|
||||
crypto_rfc4543_chain(assoc, payload, 0);
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->child);
|
||||
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
|
||||
req->base.data);
|
||||
aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
|
||||
aead_request_set_assoc(subreq, assoc, assoclen);
|
||||
|
||||
return subreq;
|
||||
}
|
||||
|
||||
static int crypto_rfc4543_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
|
||||
struct aead_request *subreq;
|
||||
int err;
|
||||
|
||||
subreq = crypto_rfc4543_crypt(req, 1);
|
||||
err = crypto_aead_encrypt(subreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_rfc4543_decrypt(struct aead_request *req)
|
||||
{
|
||||
req = crypto_rfc4543_crypt(req, 0);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
|
||||
static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
|
||||
struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_aead *aead;
|
||||
unsigned long align;
|
||||
|
||||
aead = crypto_spawn_aead(spawn);
|
||||
if (IS_ERR(aead))
|
||||
return PTR_ERR(aead);
|
||||
|
||||
ctx->child = aead;
|
||||
|
||||
align = crypto_aead_alignmask(aead);
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) +
|
||||
ALIGN(crypto_aead_reqsize(aead),
|
||||
crypto_tfm_ctx_alignment()) +
|
||||
align + 16;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
const char *ccm_name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ccm_name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(ccm_name);
|
||||
if (IS_ERR(ccm_name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spawn = crypto_instance_ctx(inst);
|
||||
crypto_set_aead_spawn(spawn, inst);
|
||||
err = crypto_grab_aead(spawn, ccm_name, 0,
|
||||
crypto_requires_sync(algt->type, algt->mask));
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
|
||||
alg = crypto_aead_spawn_alg(spawn);
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
/* We only support 16-byte blocks. */
|
||||
if (alg->cra_aead.ivsize != 16)
|
||||
goto out_drop_alg;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->cra_blocksize != 1)
|
||||
goto out_drop_alg;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
|
||||
snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4543(%s)", alg->cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_nivaead_type;
|
||||
|
||||
inst->alg.cra_aead.ivsize = 8;
|
||||
inst->alg.cra_aead.maxauthsize = 16;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
|
||||
|
||||
inst->alg.cra_init = crypto_rfc4543_init_tfm;
|
||||
inst->alg.cra_exit = crypto_rfc4543_exit_tfm;
|
||||
|
||||
inst->alg.cra_aead.setkey = crypto_rfc4543_setkey;
|
||||
inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize;
|
||||
inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt;
|
||||
|
||||
inst->alg.cra_aead.geniv = "seqiv";
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
out_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_rfc4543_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_rfc4543_tmpl = {
|
||||
.name = "rfc4543",
|
||||
.alloc = crypto_rfc4543_alloc,
|
||||
.free = crypto_rfc4543_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_gcm_module_init(void)
|
||||
{
|
||||
int err;
|
||||
@ -1067,8 +1346,14 @@ static int __init crypto_gcm_module_init(void)
|
||||
if (err)
|
||||
goto out_undo_gcm;
|
||||
|
||||
err = crypto_register_template(&crypto_rfc4543_tmpl);
|
||||
if (err)
|
||||
goto out_undo_rfc4106;
|
||||
|
||||
return 0;
|
||||
|
||||
out_undo_rfc4106:
|
||||
crypto_unregister_template(&crypto_rfc4106_tmpl);
|
||||
out_undo_gcm:
|
||||
crypto_unregister_template(&crypto_gcm_tmpl);
|
||||
out_undo_base:
|
||||
@ -1081,6 +1366,7 @@ out:
|
||||
static void __exit crypto_gcm_module_exit(void)
|
||||
{
|
||||
kfree(gcm_zeroes);
|
||||
crypto_unregister_template(&crypto_rfc4543_tmpl);
|
||||
crypto_unregister_template(&crypto_rfc4106_tmpl);
|
||||
crypto_unregister_template(&crypto_gcm_tmpl);
|
||||
crypto_unregister_template(&crypto_gcm_base_tmpl);
|
||||
@ -1094,3 +1380,4 @@ MODULE_DESCRIPTION("Galois/Counter Mode");
|
||||
MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
|
||||
MODULE_ALIAS("gcm_base");
|
||||
MODULE_ALIAS("rfc4106");
|
||||
MODULE_ALIAS("rfc4543");
|
||||
|
40
crypto/md5.c
40
crypto/md5.c
@ -16,17 +16,13 @@
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/md5.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define MD5_DIGEST_SIZE 16
|
||||
#define MD5_HMAC_BLOCK_SIZE 64
|
||||
#define MD5_BLOCK_WORDS 16
|
||||
#define MD5_HASH_WORDS 4
|
||||
|
||||
#define F1(x, y, z) (z ^ (x & (y ^ z)))
|
||||
#define F2(x, y, z) F1(z, x, y)
|
||||
#define F3(x, y, z) (x ^ y ^ z)
|
||||
@ -35,12 +31,6 @@
|
||||
#define MD5STEP(f, w, x, y, z, in, s) \
|
||||
(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
|
||||
|
||||
struct md5_ctx {
|
||||
u32 hash[MD5_HASH_WORDS];
|
||||
u32 block[MD5_BLOCK_WORDS];
|
||||
u64 byte_count;
|
||||
};
|
||||
|
||||
static void md5_transform(u32 *hash, u32 const *in)
|
||||
{
|
||||
u32 a, b, c, d;
|
||||
@ -141,7 +131,7 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void md5_transform_helper(struct md5_ctx *ctx)
|
||||
static inline void md5_transform_helper(struct md5_state *ctx)
|
||||
{
|
||||
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
|
||||
md5_transform(ctx->hash, ctx->block);
|
||||
@ -149,7 +139,7 @@ static inline void md5_transform_helper(struct md5_ctx *ctx)
|
||||
|
||||
static int md5_init(struct shash_desc *desc)
|
||||
{
|
||||
struct md5_ctx *mctx = shash_desc_ctx(desc);
|
||||
struct md5_state *mctx = shash_desc_ctx(desc);
|
||||
|
||||
mctx->hash[0] = 0x67452301;
|
||||
mctx->hash[1] = 0xefcdab89;
|
||||
@ -162,7 +152,7 @@ static int md5_init(struct shash_desc *desc)
|
||||
|
||||
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
||||
{
|
||||
struct md5_ctx *mctx = shash_desc_ctx(desc);
|
||||
struct md5_state *mctx = shash_desc_ctx(desc);
|
||||
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
|
||||
|
||||
mctx->byte_count += len;
|
||||
@ -194,7 +184,7 @@ static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
||||
|
||||
static int md5_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct md5_ctx *mctx = shash_desc_ctx(desc);
|
||||
struct md5_state *mctx = shash_desc_ctx(desc);
|
||||
const unsigned int offset = mctx->byte_count & 0x3f;
|
||||
char *p = (char *)mctx->block + offset;
|
||||
int padding = 56 - (offset + 1);
|
||||
@ -220,12 +210,30 @@ static int md5_final(struct shash_desc *desc, u8 *out)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int md5_export(struct shash_desc *desc, void *out)
|
||||
{
|
||||
struct md5_state *ctx = shash_desc_ctx(desc);
|
||||
|
||||
memcpy(out, ctx, sizeof(*ctx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int md5_import(struct shash_desc *desc, const void *in)
|
||||
{
|
||||
struct md5_state *ctx = shash_desc_ctx(desc);
|
||||
|
||||
memcpy(ctx, in, sizeof(*ctx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = MD5_DIGEST_SIZE,
|
||||
.init = md5_init,
|
||||
.update = md5_update,
|
||||
.final = md5_final,
|
||||
.descsize = sizeof(struct md5_ctx),
|
||||
.export = md5_export,
|
||||
.import = md5_import,
|
||||
.descsize = sizeof(struct md5_state),
|
||||
.base = {
|
||||
.cra_name = "md5",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
|
445
crypto/pcrypt.c
Normal file
445
crypto/pcrypt.c
Normal file
@ -0,0 +1,445 @@
|
||||
/*
|
||||
* pcrypt - Parallel crypto wrapper.
|
||||
*
|
||||
* Copyright (C) 2009 secunet Security Networks AG
|
||||
* Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <crypto/pcrypt.h>
|
||||
|
||||
static struct padata_instance *pcrypt_enc_padata;
|
||||
static struct padata_instance *pcrypt_dec_padata;
|
||||
static struct workqueue_struct *encwq;
|
||||
static struct workqueue_struct *decwq;
|
||||
|
||||
struct pcrypt_instance_ctx {
|
||||
struct crypto_spawn spawn;
|
||||
unsigned int tfm_count;
|
||||
};
|
||||
|
||||
struct pcrypt_aead_ctx {
|
||||
struct crypto_aead *child;
|
||||
unsigned int cb_cpu;
|
||||
};
|
||||
|
||||
static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
|
||||
struct padata_instance *pinst)
|
||||
{
|
||||
unsigned int cpu_index, cpu, i;
|
||||
|
||||
cpu = *cb_cpu;
|
||||
|
||||
if (cpumask_test_cpu(cpu, cpu_active_mask))
|
||||
goto out;
|
||||
|
||||
cpu_index = cpu % cpumask_weight(cpu_active_mask);
|
||||
|
||||
cpu = cpumask_first(cpu_active_mask);
|
||||
for (i = 0; i < cpu_index; i++)
|
||||
cpu = cpumask_next(cpu, cpu_active_mask);
|
||||
|
||||
*cb_cpu = cpu;
|
||||
|
||||
out:
|
||||
return padata_do_parallel(pinst, padata, cpu);
|
||||
}
|
||||
|
||||
static int pcrypt_aead_setkey(struct crypto_aead *parent,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
|
||||
|
||||
return crypto_aead_setkey(ctx->child, key, keylen);
|
||||
}
|
||||
|
||||
static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
|
||||
|
||||
return crypto_aead_setauthsize(ctx->child, authsize);
|
||||
}
|
||||
|
||||
static void pcrypt_aead_serial(struct padata_priv *padata)
|
||||
{
|
||||
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
||||
struct aead_request *req = pcrypt_request_ctx(preq);
|
||||
|
||||
aead_request_complete(req->base.data, padata->info);
|
||||
}
|
||||
|
||||
static void pcrypt_aead_giv_serial(struct padata_priv *padata)
|
||||
{
|
||||
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
||||
struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
|
||||
|
||||
aead_request_complete(req->areq.base.data, padata->info);
|
||||
}
|
||||
|
||||
static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
struct pcrypt_request *preq = aead_request_ctx(req);
|
||||
struct padata_priv *padata = pcrypt_request_padata(preq);
|
||||
|
||||
padata->info = err;
|
||||
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
padata_do_serial(padata);
|
||||
}
|
||||
|
||||
static void pcrypt_aead_enc(struct padata_priv *padata)
|
||||
{
|
||||
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
||||
struct aead_request *req = pcrypt_request_ctx(preq);
|
||||
|
||||
padata->info = crypto_aead_encrypt(req);
|
||||
|
||||
if (padata->info == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
padata_do_serial(padata);
|
||||
}
|
||||
|
||||
static int pcrypt_aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
int err;
|
||||
struct pcrypt_request *preq = aead_request_ctx(req);
|
||||
struct aead_request *creq = pcrypt_request_ctx(preq);
|
||||
struct padata_priv *padata = pcrypt_request_padata(preq);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
|
||||
u32 flags = aead_request_flags(req);
|
||||
|
||||
memset(padata, 0, sizeof(struct padata_priv));
|
||||
|
||||
padata->parallel = pcrypt_aead_enc;
|
||||
padata->serial = pcrypt_aead_serial;
|
||||
|
||||
aead_request_set_tfm(creq, ctx->child);
|
||||
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
pcrypt_aead_done, req);
|
||||
aead_request_set_crypt(creq, req->src, req->dst,
|
||||
req->cryptlen, req->iv);
|
||||
aead_request_set_assoc(creq, req->assoc, req->assoclen);
|
||||
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
|
||||
if (err)
|
||||
return err;
|
||||
else
|
||||
err = crypto_aead_encrypt(creq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void pcrypt_aead_dec(struct padata_priv *padata)
|
||||
{
|
||||
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
||||
struct aead_request *req = pcrypt_request_ctx(preq);
|
||||
|
||||
padata->info = crypto_aead_decrypt(req);
|
||||
|
||||
if (padata->info == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
padata_do_serial(padata);
|
||||
}
|
||||
|
||||
static int pcrypt_aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
int err;
|
||||
struct pcrypt_request *preq = aead_request_ctx(req);
|
||||
struct aead_request *creq = pcrypt_request_ctx(preq);
|
||||
struct padata_priv *padata = pcrypt_request_padata(preq);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
|
||||
u32 flags = aead_request_flags(req);
|
||||
|
||||
memset(padata, 0, sizeof(struct padata_priv));
|
||||
|
||||
padata->parallel = pcrypt_aead_dec;
|
||||
padata->serial = pcrypt_aead_serial;
|
||||
|
||||
aead_request_set_tfm(creq, ctx->child);
|
||||
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
pcrypt_aead_done, req);
|
||||
aead_request_set_crypt(creq, req->src, req->dst,
|
||||
req->cryptlen, req->iv);
|
||||
aead_request_set_assoc(creq, req->assoc, req->assoclen);
|
||||
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata);
|
||||
if (err)
|
||||
return err;
|
||||
else
|
||||
err = crypto_aead_decrypt(creq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void pcrypt_aead_givenc(struct padata_priv *padata)
|
||||
{
|
||||
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
||||
struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
|
||||
|
||||
padata->info = crypto_aead_givencrypt(req);
|
||||
|
||||
if (padata->info == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
padata_do_serial(padata);
|
||||
}
|
||||
|
||||
static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
int err;
|
||||
struct aead_request *areq = &req->areq;
|
||||
struct pcrypt_request *preq = aead_request_ctx(areq);
|
||||
struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
|
||||
struct padata_priv *padata = pcrypt_request_padata(preq);
|
||||
struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
|
||||
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
|
||||
u32 flags = aead_request_flags(areq);
|
||||
|
||||
memset(padata, 0, sizeof(struct padata_priv));
|
||||
|
||||
padata->parallel = pcrypt_aead_givenc;
|
||||
padata->serial = pcrypt_aead_giv_serial;
|
||||
|
||||
aead_givcrypt_set_tfm(creq, ctx->child);
|
||||
aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
pcrypt_aead_done, areq);
|
||||
aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
|
||||
areq->cryptlen, areq->iv);
|
||||
aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
|
||||
aead_givcrypt_set_giv(creq, req->giv, req->seq);
|
||||
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
|
||||
if (err)
|
||||
return err;
|
||||
else
|
||||
err = crypto_aead_givencrypt(creq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
int cpu, cpu_index;
|
||||
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
|
||||
struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_aead *cipher;
|
||||
|
||||
ictx->tfm_count++;
|
||||
|
||||
cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
|
||||
|
||||
ctx->cb_cpu = cpumask_first(cpu_active_mask);
|
||||
for (cpu = 0; cpu < cpu_index; cpu++)
|
||||
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
|
||||
|
||||
cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
|
||||
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctx->child = cipher;
|
||||
tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
|
||||
+ sizeof(struct aead_givcrypt_request)
|
||||
+ crypto_aead_reqsize(cipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
struct pcrypt_instance_ctx *ctx;
|
||||
int err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst) {
|
||||
inst = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_free_inst;
|
||||
|
||||
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
|
||||
|
||||
ctx = crypto_instance_ctx(inst);
|
||||
err = crypto_init_spawn(&ctx->spawn, alg, inst,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
|
||||
inst->alg.cra_priority = alg->cra_priority + 100;
|
||||
inst->alg.cra_blocksize = alg->cra_blocksize;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
struct crypto_attr_type *algt;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
|
||||
alg = crypto_get_attr_alg(tb, algt->type,
|
||||
(algt->mask & CRYPTO_ALG_TYPE_MASK));
|
||||
if (IS_ERR(alg))
|
||||
return ERR_CAST(alg);
|
||||
|
||||
inst = pcrypt_alloc_instance(alg);
|
||||
if (IS_ERR(inst))
|
||||
goto out_put_alg;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_type = &crypto_aead_type;
|
||||
|
||||
inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
|
||||
inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
|
||||
inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
|
||||
|
||||
inst->alg.cra_init = pcrypt_aead_init_tfm;
|
||||
inst->alg.cra_exit = pcrypt_aead_exit_tfm;
|
||||
|
||||
inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
|
||||
inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
|
||||
inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
|
||||
inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
|
||||
inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
|
||||
|
||||
out_put_alg:
|
||||
crypto_mod_put(alg);
|
||||
return inst;
|
||||
}
|
||||
|
||||
static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
|
||||
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_AEAD:
|
||||
return pcrypt_alloc_aead(tb);
|
||||
}
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void pcrypt_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_spawn(&ctx->spawn);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template pcrypt_tmpl = {
|
||||
.name = "pcrypt",
|
||||
.alloc = pcrypt_alloc,
|
||||
.free = pcrypt_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init pcrypt_init(void)
|
||||
{
|
||||
encwq = create_workqueue("pencrypt");
|
||||
if (!encwq)
|
||||
goto err;
|
||||
|
||||
decwq = create_workqueue("pdecrypt");
|
||||
if (!decwq)
|
||||
goto err_destroy_encwq;
|
||||
|
||||
|
||||
pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq);
|
||||
if (!pcrypt_enc_padata)
|
||||
goto err_destroy_decwq;
|
||||
|
||||
pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq);
|
||||
if (!pcrypt_dec_padata)
|
||||
goto err_free_padata;
|
||||
|
||||
padata_start(pcrypt_enc_padata);
|
||||
padata_start(pcrypt_dec_padata);
|
||||
|
||||
return crypto_register_template(&pcrypt_tmpl);
|
||||
|
||||
err_free_padata:
|
||||
padata_free(pcrypt_enc_padata);
|
||||
|
||||
err_destroy_decwq:
|
||||
destroy_workqueue(decwq);
|
||||
|
||||
err_destroy_encwq:
|
||||
destroy_workqueue(encwq);
|
||||
|
||||
err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __exit pcrypt_exit(void)
|
||||
{
|
||||
padata_stop(pcrypt_enc_padata);
|
||||
padata_stop(pcrypt_dec_padata);
|
||||
|
||||
destroy_workqueue(encwq);
|
||||
destroy_workqueue(decwq);
|
||||
|
||||
padata_free(pcrypt_enc_padata);
|
||||
padata_free(pcrypt_dec_padata);
|
||||
|
||||
crypto_unregister_template(&pcrypt_tmpl);
|
||||
}
|
||||
|
||||
module_init(pcrypt_init);
|
||||
module_exit(pcrypt_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
|
||||
MODULE_DESCRIPTION("Parallel crypto wrapper");
|
@ -1477,9 +1477,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int alg_test_null(const struct alg_test_desc *desc,
|
||||
const char *driver, u32 type, u32 mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Please keep this list sorted by algorithm name. */
|
||||
static const struct alg_test_desc alg_test_descs[] = {
|
||||
{
|
||||
.alg = "__driver-cbc-aes-aesni",
|
||||
.test = alg_test_null,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
},
|
||||
.dec = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "__driver-ecb-aes-aesni",
|
||||
.test = alg_test_null,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
},
|
||||
.dec = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "__ghash-pclmulqdqni",
|
||||
.test = alg_test_null,
|
||||
.suite = {
|
||||
.hash = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "ansi_cprng",
|
||||
.test = alg_test_cprng,
|
||||
.fips_allowed = 1,
|
||||
@ -1622,6 +1667,30 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.count = CRC32C_TEST_VECTORS
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "cryptd(__driver-ecb-aes-aesni)",
|
||||
.test = alg_test_null,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
},
|
||||
.dec = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "cryptd(__ghash-pclmulqdqni)",
|
||||
.test = alg_test_null,
|
||||
.suite = {
|
||||
.hash = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "ctr(aes)",
|
||||
.test = alg_test_skcipher,
|
||||
@ -1668,6 +1737,21 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
}
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(__aes-aesni)",
|
||||
.test = alg_test_null,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
},
|
||||
.dec = {
|
||||
.vecs = NULL,
|
||||
.count = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(aes)",
|
||||
.test = alg_test_skcipher,
|
||||
|
@ -186,3 +186,15 @@ config HW_RANDOM_MXC_RNGA
|
||||
module will be called mxc-rnga.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_NOMADIK
|
||||
tristate "ST-Ericsson Nomadik Random Number Generator support"
|
||||
depends on HW_RANDOM && PLAT_NOMADIK
|
||||
---help---
|
||||
This driver provides kernel-side support for the Random Number
|
||||
Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called nomadik-rng.
|
||||
|
||||
If unsure, say Y.
|
||||
|
@ -18,3 +18,4 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
|
||||
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
|
||||
|
103
drivers/char/hw_random/nomadik-rng.c
Normal file
103
drivers/char/hw_random/nomadik-rng.c
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Nomadik RNG support
|
||||
* Copyright 2009 Alessandro Rubini
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/amba/bus.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
{
|
||||
void __iomem *base = (void __iomem *)rng->priv;
|
||||
|
||||
/*
|
||||
* The register is 32 bits and gives 16 random bits (low half).
|
||||
* A subsequent read will delay the core for 400ns, so we just read
|
||||
* once and accept the very unlikely very small delay, even if wait==0.
|
||||
*/
|
||||
*(u16 *)data = __raw_readl(base + 8) & 0xffff;
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* we have at most one RNG per machine, granted */
|
||||
static struct hwrng nmk_rng = {
|
||||
.name = "nomadik",
|
||||
.read = nmk_rng_read,
|
||||
};
|
||||
|
||||
static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
|
||||
{
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
|
||||
ret = amba_request_regions(dev, dev->dev.init_name);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = -ENOMEM;
|
||||
base = ioremap(dev->res.start, resource_size(&dev->res));
|
||||
if (!base)
|
||||
goto out_release;
|
||||
nmk_rng.priv = (unsigned long)base;
|
||||
ret = hwrng_register(&nmk_rng);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
iounmap(base);
|
||||
out_release:
|
||||
amba_release_regions(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nmk_rng_remove(struct amba_device *dev)
|
||||
{
|
||||
void __iomem *base = (void __iomem *)nmk_rng.priv;
|
||||
hwrng_unregister(&nmk_rng);
|
||||
iounmap(base);
|
||||
amba_release_regions(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct amba_id nmk_rng_ids[] = {
|
||||
{
|
||||
.id = 0x000805e1,
|
||||
.mask = 0x000fffff, /* top bits are rev and cfg: accept all */
|
||||
},
|
||||
{0, 0},
|
||||
};
|
||||
|
||||
static struct amba_driver nmk_rng_driver = {
|
||||
.drv = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "rng",
|
||||
},
|
||||
.probe = nmk_rng_probe,
|
||||
.remove = nmk_rng_remove,
|
||||
.id_table = nmk_rng_ids,
|
||||
};
|
||||
|
||||
static int __init nmk_rng_init(void)
|
||||
{
|
||||
return amba_driver_register(&nmk_rng_driver);
|
||||
}
|
||||
|
||||
static void __devexit nmk_rng_exit(void)
|
||||
{
|
||||
amba_driver_unregister(&nmk_rng_driver);
|
||||
}
|
||||
|
||||
module_init(nmk_rng_init);
|
||||
module_exit(nmk_rng_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
@ -1274,7 +1274,7 @@ static int __exit crypto4xx_remove(struct of_device *ofdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id crypto4xx_match[] = {
|
||||
static const struct of_device_id crypto4xx_match[] = {
|
||||
{ .compatible = "amcc,ppc4xx-crypto",},
|
||||
{ },
|
||||
};
|
||||
|
@ -135,13 +135,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
|
||||
/*
|
||||
* The requested key size is not supported by HW, do a fallback
|
||||
*/
|
||||
op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_cipher_setkey(op->fallback.cip, key, len);
|
||||
if (ret) {
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
|
||||
tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -263,7 +263,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
|
||||
|
||||
if (IS_ERR(op->fallback.cip)) {
|
||||
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
|
||||
return PTR_ERR(op->fallback.blk);
|
||||
return PTR_ERR(op->fallback.cip);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1958,7 +1958,7 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct of_device_id talitos_match[] = {
|
||||
static const struct of_device_id talitos_match[] = {
|
||||
{
|
||||
.compatible = "fsl,sec2.0",
|
||||
},
|
||||
|
17
include/crypto/md5.h
Normal file
17
include/crypto/md5.h
Normal file
@ -0,0 +1,17 @@
|
||||
#ifndef _CRYPTO_MD5_H
|
||||
#define _CRYPTO_MD5_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define MD5_DIGEST_SIZE 16
|
||||
#define MD5_HMAC_BLOCK_SIZE 64
|
||||
#define MD5_BLOCK_WORDS 16
|
||||
#define MD5_HASH_WORDS 4
|
||||
|
||||
struct md5_state {
|
||||
u32 hash[MD5_HASH_WORDS];
|
||||
u32 block[MD5_BLOCK_WORDS];
|
||||
u64 byte_count;
|
||||
};
|
||||
|
||||
#endif
|
51
include/crypto/pcrypt.h
Normal file
51
include/crypto/pcrypt.h
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* pcrypt - Parallel crypto engine.
|
||||
*
|
||||
* Copyright (C) 2009 secunet Security Networks AG
|
||||
* Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_PCRYPT_H
|
||||
#define _CRYPTO_PCRYPT_H
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/padata.h>
|
||||
|
||||
struct pcrypt_request {
|
||||
struct padata_priv padata;
|
||||
void *data;
|
||||
void *__ctx[] CRYPTO_MINALIGN_ATTR;
|
||||
};
|
||||
|
||||
static inline void *pcrypt_request_ctx(struct pcrypt_request *req)
|
||||
{
|
||||
return req->__ctx;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req)
|
||||
{
|
||||
return &req->padata;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata)
|
||||
{
|
||||
return container_of(padata, struct pcrypt_request, padata);
|
||||
}
|
||||
|
||||
#endif
|
88
include/linux/padata.h
Normal file
88
include/linux/padata.h
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* padata.h - header for the padata parallelization interface
|
||||
*
|
||||
* Copyright (C) 2008, 2009 secunet Security Networks AG
|
||||
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#ifndef PADATA_H
|
||||
#define PADATA_H
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct padata_priv {
|
||||
struct list_head list;
|
||||
struct parallel_data *pd;
|
||||
int cb_cpu;
|
||||
int seq_nr;
|
||||
int info;
|
||||
void (*parallel)(struct padata_priv *padata);
|
||||
void (*serial)(struct padata_priv *padata);
|
||||
};
|
||||
|
||||
struct padata_list {
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct padata_queue {
|
||||
struct padata_list parallel;
|
||||
struct padata_list reorder;
|
||||
struct padata_list serial;
|
||||
struct work_struct pwork;
|
||||
struct work_struct swork;
|
||||
struct parallel_data *pd;
|
||||
atomic_t num_obj;
|
||||
int cpu_index;
|
||||
};
|
||||
|
||||
struct parallel_data {
|
||||
struct padata_instance *pinst;
|
||||
struct padata_queue *queue;
|
||||
atomic_t seq_nr;
|
||||
atomic_t reorder_objects;
|
||||
atomic_t refcnt;
|
||||
unsigned int max_seq_nr;
|
||||
cpumask_var_t cpumask;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct padata_instance {
|
||||
struct notifier_block cpu_notifier;
|
||||
struct workqueue_struct *wq;
|
||||
struct parallel_data *pd;
|
||||
cpumask_var_t cpumask;
|
||||
struct mutex lock;
|
||||
u8 flags;
|
||||
#define PADATA_INIT 1
|
||||
#define PADATA_RESET 2
|
||||
};
|
||||
|
||||
extern struct padata_instance *padata_alloc(const struct cpumask *cpumask,
|
||||
struct workqueue_struct *wq);
|
||||
extern void padata_free(struct padata_instance *pinst);
|
||||
extern int padata_do_parallel(struct padata_instance *pinst,
|
||||
struct padata_priv *padata, int cb_cpu);
|
||||
extern void padata_do_serial(struct padata_priv *padata);
|
||||
extern int padata_set_cpumask(struct padata_instance *pinst,
|
||||
cpumask_var_t cpumask);
|
||||
extern int padata_add_cpu(struct padata_instance *pinst, int cpu);
|
||||
extern int padata_remove_cpu(struct padata_instance *pinst, int cpu);
|
||||
extern void padata_start(struct padata_instance *pinst);
|
||||
extern void padata_stop(struct padata_instance *pinst);
|
||||
#endif
|
@ -315,6 +315,7 @@ struct sadb_x_kmaddress {
|
||||
#define SADB_X_EALG_AES_GCM_ICV12 19
|
||||
#define SADB_X_EALG_AES_GCM_ICV16 20
|
||||
#define SADB_X_EALG_CAMELLIACBC 22
|
||||
#define SADB_X_EALG_NULL_AES_GMAC 23
|
||||
#define SADB_EALG_MAX 253 /* last EALG */
|
||||
/* private allocations should use 249-255 (RFC2407) */
|
||||
#define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */
|
||||
|
@ -1262,4 +1262,8 @@ source "block/Kconfig"
|
||||
config PREEMPT_NOTIFIERS
|
||||
bool
|
||||
|
||||
config PADATA
|
||||
depends on SMP
|
||||
bool
|
||||
|
||||
source "kernel/Kconfig.locks"
|
||||
|
@ -100,6 +100,7 @@ obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
|
||||
obj-$(CONFIG_PADATA) += padata.o
|
||||
|
||||
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
|
||||
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
|
||||
|
690
kernel/padata.c
Normal file
690
kernel/padata.c
Normal file
@ -0,0 +1,690 @@
|
||||
/*
|
||||
* padata.c - generic interface to process data streams in parallel
|
||||
*
|
||||
* Copyright (C) 2008, 2009 secunet Security Networks AG
|
||||
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/padata.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#define MAX_SEQ_NR INT_MAX - NR_CPUS
|
||||
#define MAX_OBJ_NUM 10000 * NR_CPUS
|
||||
|
||||
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
|
||||
{
|
||||
int cpu, target_cpu;
|
||||
|
||||
target_cpu = cpumask_first(pd->cpumask);
|
||||
for (cpu = 0; cpu < cpu_index; cpu++)
|
||||
target_cpu = cpumask_next(target_cpu, pd->cpumask);
|
||||
|
||||
return target_cpu;
|
||||
}
|
||||
|
||||
static int padata_cpu_hash(struct padata_priv *padata)
|
||||
{
|
||||
int cpu_index;
|
||||
struct parallel_data *pd;
|
||||
|
||||
pd = padata->pd;
|
||||
|
||||
/*
|
||||
* Hash the sequence numbers to the cpus by taking
|
||||
* seq_nr mod. number of cpus in use.
|
||||
*/
|
||||
cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask);
|
||||
|
||||
return padata_index_to_cpu(pd, cpu_index);
|
||||
}
|
||||
|
||||
static void padata_parallel_worker(struct work_struct *work)
|
||||
{
|
||||
struct padata_queue *queue;
|
||||
struct parallel_data *pd;
|
||||
struct padata_instance *pinst;
|
||||
LIST_HEAD(local_list);
|
||||
|
||||
local_bh_disable();
|
||||
queue = container_of(work, struct padata_queue, pwork);
|
||||
pd = queue->pd;
|
||||
pinst = pd->pinst;
|
||||
|
||||
spin_lock(&queue->parallel.lock);
|
||||
list_replace_init(&queue->parallel.list, &local_list);
|
||||
spin_unlock(&queue->parallel.lock);
|
||||
|
||||
while (!list_empty(&local_list)) {
|
||||
struct padata_priv *padata;
|
||||
|
||||
padata = list_entry(local_list.next,
|
||||
struct padata_priv, list);
|
||||
|
||||
list_del_init(&padata->list);
|
||||
|
||||
padata->parallel(padata);
|
||||
}
|
||||
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* padata_do_parallel - padata parallelization function
|
||||
*
|
||||
* @pinst: padata instance
|
||||
* @padata: object to be parallelized
|
||||
* @cb_cpu: cpu the serialization callback function will run on,
|
||||
* must be in the cpumask of padata.
|
||||
*
|
||||
* The parallelization callback function will run with BHs off.
|
||||
* Note: Every object which is parallelized by padata_do_parallel
|
||||
* must be seen by padata_do_serial.
|
||||
*/
|
||||
int padata_do_parallel(struct padata_instance *pinst,
|
||||
struct padata_priv *padata, int cb_cpu)
|
||||
{
|
||||
int target_cpu, err;
|
||||
struct padata_queue *queue;
|
||||
struct parallel_data *pd;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
|
||||
pd = rcu_dereference(pinst->pd);
|
||||
|
||||
err = 0;
|
||||
if (!(pinst->flags & PADATA_INIT))
|
||||
goto out;
|
||||
|
||||
err = -EBUSY;
|
||||
if ((pinst->flags & PADATA_RESET))
|
||||
goto out;
|
||||
|
||||
if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
|
||||
goto out;
|
||||
|
||||
err = -EINVAL;
|
||||
if (!cpumask_test_cpu(cb_cpu, pd->cpumask))
|
||||
goto out;
|
||||
|
||||
err = -EINPROGRESS;
|
||||
atomic_inc(&pd->refcnt);
|
||||
padata->pd = pd;
|
||||
padata->cb_cpu = cb_cpu;
|
||||
|
||||
if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
|
||||
atomic_set(&pd->seq_nr, -1);
|
||||
|
||||
padata->seq_nr = atomic_inc_return(&pd->seq_nr);
|
||||
|
||||
target_cpu = padata_cpu_hash(padata);
|
||||
queue = per_cpu_ptr(pd->queue, target_cpu);
|
||||
|
||||
spin_lock(&queue->parallel.lock);
|
||||
list_add_tail(&padata->list, &queue->parallel.list);
|
||||
spin_unlock(&queue->parallel.lock);
|
||||
|
||||
queue_work_on(target_cpu, pinst->wq, &queue->pwork);
|
||||
|
||||
out:
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_do_parallel);
|
||||
|
||||
static struct padata_priv *padata_get_next(struct parallel_data *pd)
|
||||
{
|
||||
int cpu, num_cpus, empty, calc_seq_nr;
|
||||
int seq_nr, next_nr, overrun, next_overrun;
|
||||
struct padata_queue *queue, *next_queue;
|
||||
struct padata_priv *padata;
|
||||
struct padata_list *reorder;
|
||||
|
||||
empty = 0;
|
||||
next_nr = -1;
|
||||
next_overrun = 0;
|
||||
next_queue = NULL;
|
||||
|
||||
num_cpus = cpumask_weight(pd->cpumask);
|
||||
|
||||
for_each_cpu(cpu, pd->cpumask) {
|
||||
queue = per_cpu_ptr(pd->queue, cpu);
|
||||
reorder = &queue->reorder;
|
||||
|
||||
/*
|
||||
* Calculate the seq_nr of the object that should be
|
||||
* next in this queue.
|
||||
*/
|
||||
overrun = 0;
|
||||
calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus)
|
||||
+ queue->cpu_index;
|
||||
|
||||
if (unlikely(calc_seq_nr > pd->max_seq_nr)) {
|
||||
calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1;
|
||||
overrun = 1;
|
||||
}
|
||||
|
||||
if (!list_empty(&reorder->list)) {
|
||||
padata = list_entry(reorder->list.next,
|
||||
struct padata_priv, list);
|
||||
|
||||
seq_nr = padata->seq_nr;
|
||||
BUG_ON(calc_seq_nr != seq_nr);
|
||||
} else {
|
||||
seq_nr = calc_seq_nr;
|
||||
empty++;
|
||||
}
|
||||
|
||||
if (next_nr < 0 || seq_nr < next_nr
|
||||
|| (next_overrun && !overrun)) {
|
||||
next_nr = seq_nr;
|
||||
next_overrun = overrun;
|
||||
next_queue = queue;
|
||||
}
|
||||
}
|
||||
|
||||
padata = NULL;
|
||||
|
||||
if (empty == num_cpus)
|
||||
goto out;
|
||||
|
||||
reorder = &next_queue->reorder;
|
||||
|
||||
if (!list_empty(&reorder->list)) {
|
||||
padata = list_entry(reorder->list.next,
|
||||
struct padata_priv, list);
|
||||
|
||||
if (unlikely(next_overrun)) {
|
||||
for_each_cpu(cpu, pd->cpumask) {
|
||||
queue = per_cpu_ptr(pd->queue, cpu);
|
||||
atomic_set(&queue->num_obj, 0);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&reorder->lock);
|
||||
list_del_init(&padata->list);
|
||||
atomic_dec(&pd->reorder_objects);
|
||||
spin_unlock(&reorder->lock);
|
||||
|
||||
atomic_inc(&next_queue->num_obj);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (next_nr % num_cpus == next_queue->cpu_index) {
|
||||
padata = ERR_PTR(-ENODATA);
|
||||
goto out;
|
||||
}
|
||||
|
||||
padata = ERR_PTR(-EINPROGRESS);
|
||||
out:
|
||||
return padata;
|
||||
}
|
||||
|
||||
static void padata_reorder(struct parallel_data *pd)
|
||||
{
|
||||
struct padata_priv *padata;
|
||||
struct padata_queue *queue;
|
||||
struct padata_instance *pinst = pd->pinst;
|
||||
|
||||
try_again:
|
||||
if (!spin_trylock_bh(&pd->lock))
|
||||
goto out;
|
||||
|
||||
while (1) {
|
||||
padata = padata_get_next(pd);
|
||||
|
||||
if (!padata || PTR_ERR(padata) == -EINPROGRESS)
|
||||
break;
|
||||
|
||||
if (PTR_ERR(padata) == -ENODATA) {
|
||||
spin_unlock_bh(&pd->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
|
||||
|
||||
spin_lock(&queue->serial.lock);
|
||||
list_add_tail(&padata->list, &queue->serial.list);
|
||||
spin_unlock(&queue->serial.lock);
|
||||
|
||||
queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&pd->lock);
|
||||
|
||||
if (atomic_read(&pd->reorder_objects))
|
||||
goto try_again;
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
static void padata_serial_worker(struct work_struct *work)
|
||||
{
|
||||
struct padata_queue *queue;
|
||||
struct parallel_data *pd;
|
||||
LIST_HEAD(local_list);
|
||||
|
||||
local_bh_disable();
|
||||
queue = container_of(work, struct padata_queue, swork);
|
||||
pd = queue->pd;
|
||||
|
||||
spin_lock(&queue->serial.lock);
|
||||
list_replace_init(&queue->serial.list, &local_list);
|
||||
spin_unlock(&queue->serial.lock);
|
||||
|
||||
while (!list_empty(&local_list)) {
|
||||
struct padata_priv *padata;
|
||||
|
||||
padata = list_entry(local_list.next,
|
||||
struct padata_priv, list);
|
||||
|
||||
list_del_init(&padata->list);
|
||||
|
||||
padata->serial(padata);
|
||||
atomic_dec(&pd->refcnt);
|
||||
}
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* padata_do_serial - padata serialization function
|
||||
*
|
||||
* @padata: object to be serialized.
|
||||
*
|
||||
* padata_do_serial must be called for every parallelized object.
|
||||
* The serialization callback function will run with BHs off.
|
||||
*/
|
||||
void padata_do_serial(struct padata_priv *padata)
|
||||
{
|
||||
int cpu;
|
||||
struct padata_queue *queue;
|
||||
struct parallel_data *pd;
|
||||
|
||||
pd = padata->pd;
|
||||
|
||||
cpu = get_cpu();
|
||||
queue = per_cpu_ptr(pd->queue, cpu);
|
||||
|
||||
spin_lock(&queue->reorder.lock);
|
||||
atomic_inc(&pd->reorder_objects);
|
||||
list_add_tail(&padata->list, &queue->reorder.list);
|
||||
spin_unlock(&queue->reorder.lock);
|
||||
|
||||
put_cpu();
|
||||
|
||||
padata_reorder(pd);
|
||||
}
|
||||
EXPORT_SYMBOL(padata_do_serial);
|
||||
|
||||
static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
|
||||
const struct cpumask *cpumask)
|
||||
{
|
||||
int cpu, cpu_index, num_cpus;
|
||||
struct padata_queue *queue;
|
||||
struct parallel_data *pd;
|
||||
|
||||
cpu_index = 0;
|
||||
|
||||
pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
|
||||
if (!pd)
|
||||
goto err;
|
||||
|
||||
pd->queue = alloc_percpu(struct padata_queue);
|
||||
if (!pd->queue)
|
||||
goto err_free_pd;
|
||||
|
||||
if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
|
||||
goto err_free_queue;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
queue = per_cpu_ptr(pd->queue, cpu);
|
||||
|
||||
queue->pd = pd;
|
||||
|
||||
if (cpumask_test_cpu(cpu, cpumask)
|
||||
&& cpumask_test_cpu(cpu, cpu_active_mask)) {
|
||||
queue->cpu_index = cpu_index;
|
||||
cpu_index++;
|
||||
} else
|
||||
queue->cpu_index = -1;
|
||||
|
||||
INIT_LIST_HEAD(&queue->reorder.list);
|
||||
INIT_LIST_HEAD(&queue->parallel.list);
|
||||
INIT_LIST_HEAD(&queue->serial.list);
|
||||
spin_lock_init(&queue->reorder.lock);
|
||||
spin_lock_init(&queue->parallel.lock);
|
||||
spin_lock_init(&queue->serial.lock);
|
||||
|
||||
INIT_WORK(&queue->pwork, padata_parallel_worker);
|
||||
INIT_WORK(&queue->swork, padata_serial_worker);
|
||||
atomic_set(&queue->num_obj, 0);
|
||||
}
|
||||
|
||||
cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
|
||||
|
||||
num_cpus = cpumask_weight(pd->cpumask);
|
||||
pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
|
||||
|
||||
atomic_set(&pd->seq_nr, -1);
|
||||
atomic_set(&pd->reorder_objects, 0);
|
||||
atomic_set(&pd->refcnt, 0);
|
||||
pd->pinst = pinst;
|
||||
spin_lock_init(&pd->lock);
|
||||
|
||||
return pd;
|
||||
|
||||
err_free_queue:
|
||||
free_percpu(pd->queue);
|
||||
err_free_pd:
|
||||
kfree(pd);
|
||||
err:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void padata_free_pd(struct parallel_data *pd)
|
||||
{
|
||||
free_cpumask_var(pd->cpumask);
|
||||
free_percpu(pd->queue);
|
||||
kfree(pd);
|
||||
}
|
||||
|
||||
static void padata_replace(struct padata_instance *pinst,
|
||||
struct parallel_data *pd_new)
|
||||
{
|
||||
struct parallel_data *pd_old = pinst->pd;
|
||||
|
||||
pinst->flags |= PADATA_RESET;
|
||||
|
||||
rcu_assign_pointer(pinst->pd, pd_new);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
while (atomic_read(&pd_old->refcnt) != 0)
|
||||
yield();
|
||||
|
||||
flush_workqueue(pinst->wq);
|
||||
|
||||
padata_free_pd(pd_old);
|
||||
|
||||
pinst->flags &= ~PADATA_RESET;
|
||||
}
|
||||
|
||||
/*
|
||||
* padata_set_cpumask - set the cpumask that padata should use
|
||||
*
|
||||
* @pinst: padata instance
|
||||
* @cpumask: the cpumask to use
|
||||
*/
|
||||
int padata_set_cpumask(struct padata_instance *pinst,
|
||||
cpumask_var_t cpumask)
|
||||
{
|
||||
struct parallel_data *pd;
|
||||
int err = 0;
|
||||
|
||||
might_sleep();
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
|
||||
pd = padata_alloc_pd(pinst, cpumask);
|
||||
if (!pd) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cpumask_copy(pinst->cpumask, cpumask);
|
||||
|
||||
padata_replace(pinst, pd);
|
||||
|
||||
out:
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_set_cpumask);
|
||||
|
||||
static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
|
||||
{
|
||||
struct parallel_data *pd;
|
||||
|
||||
if (cpumask_test_cpu(cpu, cpu_active_mask)) {
|
||||
pd = padata_alloc_pd(pinst, pinst->cpumask);
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
padata_replace(pinst, pd);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* padata_add_cpu - add a cpu to the padata cpumask
|
||||
*
|
||||
* @pinst: padata instance
|
||||
* @cpu: cpu to add
|
||||
*/
|
||||
int padata_add_cpu(struct padata_instance *pinst, int cpu)
|
||||
{
|
||||
int err;
|
||||
|
||||
might_sleep();
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
|
||||
cpumask_set_cpu(cpu, pinst->cpumask);
|
||||
err = __padata_add_cpu(pinst, cpu);
|
||||
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_add_cpu);
|
||||
|
||||
static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
|
||||
{
|
||||
struct parallel_data *pd;
|
||||
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask)) {
|
||||
pd = padata_alloc_pd(pinst, pinst->cpumask);
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
padata_replace(pinst, pd);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* padata_remove_cpu - remove a cpu from the padata cpumask
|
||||
*
|
||||
* @pinst: padata instance
|
||||
* @cpu: cpu to remove
|
||||
*/
|
||||
int padata_remove_cpu(struct padata_instance *pinst, int cpu)
|
||||
{
|
||||
int err;
|
||||
|
||||
might_sleep();
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
|
||||
cpumask_clear_cpu(cpu, pinst->cpumask);
|
||||
err = __padata_remove_cpu(pinst, cpu);
|
||||
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_remove_cpu);
|
||||
|
||||
/*
|
||||
* padata_start - start the parallel processing
|
||||
*
|
||||
* @pinst: padata instance to start
|
||||
*/
|
||||
void padata_start(struct padata_instance *pinst)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
pinst->flags |= PADATA_INIT;
|
||||
mutex_unlock(&pinst->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(padata_start);
|
||||
|
||||
/*
|
||||
* padata_stop - stop the parallel processing
|
||||
*
|
||||
* @pinst: padata instance to stop
|
||||
*/
|
||||
void padata_stop(struct padata_instance *pinst)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
pinst->flags &= ~PADATA_INIT;
|
||||
mutex_unlock(&pinst->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(padata_stop);
|
||||
|
||||
static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
int err;
|
||||
struct padata_instance *pinst;
|
||||
int cpu = (unsigned long)hcpu;
|
||||
|
||||
pinst = container_of(nfb, struct padata_instance, cpu_notifier);
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
if (!cpumask_test_cpu(cpu, pinst->cpumask))
|
||||
break;
|
||||
mutex_lock(&pinst->lock);
|
||||
err = __padata_add_cpu(pinst, cpu);
|
||||
mutex_unlock(&pinst->lock);
|
||||
if (err)
|
||||
return NOTIFY_BAD;
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
if (!cpumask_test_cpu(cpu, pinst->cpumask))
|
||||
break;
|
||||
mutex_lock(&pinst->lock);
|
||||
err = __padata_remove_cpu(pinst, cpu);
|
||||
mutex_unlock(&pinst->lock);
|
||||
if (err)
|
||||
return NOTIFY_BAD;
|
||||
break;
|
||||
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
if (!cpumask_test_cpu(cpu, pinst->cpumask))
|
||||
break;
|
||||
mutex_lock(&pinst->lock);
|
||||
__padata_remove_cpu(pinst, cpu);
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
case CPU_DOWN_FAILED:
|
||||
case CPU_DOWN_FAILED_FROZEN:
|
||||
if (!cpumask_test_cpu(cpu, pinst->cpumask))
|
||||
break;
|
||||
mutex_lock(&pinst->lock);
|
||||
__padata_add_cpu(pinst, cpu);
|
||||
mutex_unlock(&pinst->lock);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* padata_alloc - allocate and initialize a padata instance
|
||||
*
|
||||
* @cpumask: cpumask that padata uses for parallelization
|
||||
* @wq: workqueue to use for the allocated padata instance
|
||||
*/
|
||||
struct padata_instance *padata_alloc(const struct cpumask *cpumask,
|
||||
struct workqueue_struct *wq)
|
||||
{
|
||||
int err;
|
||||
struct padata_instance *pinst;
|
||||
struct parallel_data *pd;
|
||||
|
||||
pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
|
||||
if (!pinst)
|
||||
goto err;
|
||||
|
||||
pd = padata_alloc_pd(pinst, cpumask);
|
||||
if (!pd)
|
||||
goto err_free_inst;
|
||||
|
||||
rcu_assign_pointer(pinst->pd, pd);
|
||||
|
||||
pinst->wq = wq;
|
||||
|
||||
cpumask_copy(pinst->cpumask, cpumask);
|
||||
|
||||
pinst->flags = 0;
|
||||
|
||||
pinst->cpu_notifier.notifier_call = padata_cpu_callback;
|
||||
pinst->cpu_notifier.priority = 0;
|
||||
err = register_hotcpu_notifier(&pinst->cpu_notifier);
|
||||
if (err)
|
||||
goto err_free_pd;
|
||||
|
||||
mutex_init(&pinst->lock);
|
||||
|
||||
return pinst;
|
||||
|
||||
err_free_pd:
|
||||
padata_free_pd(pd);
|
||||
err_free_inst:
|
||||
kfree(pinst);
|
||||
err:
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_alloc);
|
||||
|
||||
/*
|
||||
* padata_free - free a padata instance
|
||||
*
|
||||
* @ padata_inst: padata instance to free
|
||||
*/
|
||||
void padata_free(struct padata_instance *pinst)
|
||||
{
|
||||
padata_stop(pinst);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
while (atomic_read(&pinst->pd->refcnt) != 0)
|
||||
yield();
|
||||
|
||||
unregister_hotcpu_notifier(&pinst->cpu_notifier);
|
||||
padata_free_pd(pinst->pd);
|
||||
kfree(pinst);
|
||||
}
|
||||
EXPORT_SYMBOL(padata_free);
|
@ -125,6 +125,22 @@ static struct xfrm_algo_desc aead_list[] = {
|
||||
.sadb_alg_maxbits = 256
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "rfc4543(gcm(aes))",
|
||||
|
||||
.uinfo = {
|
||||
.aead = {
|
||||
.icv_truncbits = 128,
|
||||
}
|
||||
},
|
||||
|
||||
.desc = {
|
||||
.sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
|
||||
.sadb_alg_ivlen = 8,
|
||||
.sadb_alg_minbits = 128,
|
||||
.sadb_alg_maxbits = 256
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static struct xfrm_algo_desc aalg_list[] = {
|
||||
|
Loading…
Reference in New Issue
Block a user