mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Add support for allocating transforms on a specific NUMA Node - Introduce the flag CRYPTO_ALG_ALLOCATES_MEMORY for storage users Algorithms: - Drop PMULL based ghash on arm64 - Fixes for building with clang on x86 - Add sha256 helper that does the digest in one go - Add SP800-56A rev 3 validation checks to dh Drivers: - Permit users to specify NUMA node in hisilicon/zip - Add support for i.MX6 in imx-rngc - Add sa2ul crypto driver - Add BA431 hwrng driver - Add Ingenic JZ4780 and X1000 hwrng driver - Spread IRQ affinity in inside-secure and marvell/cesa" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (157 commits) crypto: sa2ul - Fix inconsistent IS_ERR and PTR_ERR hwrng: core - remove redundant initialization of variable ret crypto: x86/curve25519 - Remove unused carry variables crypto: ingenic - Add hardware RNG for Ingenic JZ4780 and X1000 dt-bindings: RNG: Add Ingenic RNG bindings. crypto: caam/qi2 - add module alias crypto: caam - add more RNG hw error codes crypto: caam/jr - remove incorrect reference to caam_jr_register() crypto: caam - silence .setkey in case of bad key length crypto: caam/qi2 - create ahash shared descriptors only once crypto: caam/qi2 - fix error reporting for caam_hash_alloc crypto: caam - remove deadcode on 32-bit platforms crypto: ccp - use generic power management crypto: xts - Replace memcpy() invocation with simple assignment crypto: marvell/cesa - irq balance crypto: inside-secure - irq balance crypto: ecc - SP800-56A rev 3 local public key validation crypto: dh - SP800-56A rev 3 local public key validation crypto: dh - check validity of Z before export lib/mpi: Add mpi_sub_ui() ...
This commit is contained in:
commit
ab5c60b79a
@ -27,22 +27,11 @@ padata_instance structure for overall control of how jobs are to be run::
|
||||
|
||||
#include <linux/padata.h>
|
||||
|
||||
struct padata_instance *padata_alloc_possible(const char *name);
|
||||
struct padata_instance *padata_alloc(const char *name);
|
||||
|
||||
'name' simply identifies the instance.
|
||||
|
||||
There are functions for enabling and disabling the instance::
|
||||
|
||||
int padata_start(struct padata_instance *pinst);
|
||||
void padata_stop(struct padata_instance *pinst);
|
||||
|
||||
These functions are setting or clearing the "PADATA_INIT" flag; if that flag is
|
||||
not set, other functions will refuse to work. padata_start() returns zero on
|
||||
success (flag set) or -EINVAL if the padata cpumask contains no active CPU
|
||||
(flag not set). padata_stop() clears the flag and blocks until the padata
|
||||
instance is unused.
|
||||
|
||||
Finally, complete padata initialization by allocating a padata_shell::
|
||||
Then, complete padata initialization by allocating a padata_shell::
|
||||
|
||||
struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
|
||||
|
||||
@ -155,11 +144,10 @@ submitted.
|
||||
Destroying
|
||||
----------
|
||||
|
||||
Cleaning up a padata instance predictably involves calling the three free
|
||||
Cleaning up a padata instance predictably involves calling the two free
|
||||
functions that correspond to the allocation in reverse::
|
||||
|
||||
void padata_free_shell(struct padata_shell *ps);
|
||||
void padata_stop(struct padata_instance *pinst);
|
||||
void padata_free(struct padata_instance *pinst);
|
||||
|
||||
It is the user's responsibility to ensure all outstanding jobs are complete
|
||||
|
@ -169,7 +169,7 @@ Portions of this API were derived from the following projects:
|
||||
|
||||
and;
|
||||
|
||||
Nettle (http://www.lysator.liu.se/~nisse/nettle/)
|
||||
Nettle (https://www.lysator.liu.se/~nisse/nettle/)
|
||||
Niels Möller
|
||||
|
||||
Original developers of the crypto algorithms:
|
||||
|
@ -23,7 +23,7 @@ user space, however. This includes the difference between synchronous
|
||||
and asynchronous invocations. The user space API call is fully
|
||||
synchronous.
|
||||
|
||||
[1] http://www.chronox.de/libkcapi.html
|
||||
[1] https://www.chronox.de/libkcapi.html
|
||||
|
||||
User Space API General Remarks
|
||||
------------------------------
|
||||
@ -384,4 +384,4 @@ Please see [1] for libkcapi which provides an easy-to-use wrapper around
|
||||
the aforementioned Netlink kernel interface. [1] also contains a test
|
||||
application that invokes all libkcapi API calls.
|
||||
|
||||
[1] http://www.chronox.de/libkcapi.html
|
||||
[1] https://www.chronox.de/libkcapi.html
|
||||
|
76
Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
Normal file
76
Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
Normal file
@ -0,0 +1,76 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/crypto/ti,sa2ul.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: K3 SoC SA2UL crypto module
|
||||
|
||||
maintainers:
|
||||
- Tero Kristo <t-kristo@ti.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- ti,j721e-sa2ul
|
||||
- ti,am654-sa2ul
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
dmas:
|
||||
items:
|
||||
- description: TX DMA Channel
|
||||
- description: RX DMA Channel #1
|
||||
- description: RX DMA Channel #2
|
||||
|
||||
dma-names:
|
||||
items:
|
||||
- const: tx
|
||||
- const: rx1
|
||||
- const: rx2
|
||||
|
||||
dma-coherent: true
|
||||
|
||||
"#address-cells":
|
||||
const: 2
|
||||
|
||||
"#size-cells":
|
||||
const: 2
|
||||
|
||||
ranges:
|
||||
description:
|
||||
Address translation for the possible RNG child node for SA2UL
|
||||
|
||||
patternProperties:
|
||||
"^rng@[a-f0-9]+$":
|
||||
type: object
|
||||
description:
|
||||
Child RNG node for SA2UL
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- power-domains
|
||||
- dmas
|
||||
- dma-names
|
||||
- dma-coherent
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/soc/ti,sci_pm_domain.h>
|
||||
|
||||
main_crypto: crypto@4e00000 {
|
||||
compatible = "ti,j721-sa2ul";
|
||||
reg = <0x0 0x4e00000 0x0 0x1200>;
|
||||
power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
|
||||
dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
|
||||
<&main_udmap 0x4001>;
|
||||
dma-names = "tx", "rx1", "rx2";
|
||||
dma-coherent;
|
||||
};
|
@ -5,6 +5,9 @@ Required properties:
|
||||
"fsl,imx21-rnga"
|
||||
"fsl,imx31-rnga" (backward compatible with "fsl,imx21-rnga")
|
||||
"fsl,imx25-rngb"
|
||||
"fsl,imx6sl-rngb" (backward compatible with "fsl,imx25-rngb")
|
||||
"fsl,imx6sll-rngb" (backward compatible with "fsl,imx25-rngb")
|
||||
"fsl,imx6ull-rngb" (backward compatible with "fsl,imx25-rngb")
|
||||
"fsl,imx35-rngc"
|
||||
- reg : offset and length of the register set of this block
|
||||
- interrupts : the interrupt number for the RNG block
|
||||
|
36
Documentation/devicetree/bindings/rng/ingenic,rng.yaml
Normal file
36
Documentation/devicetree/bindings/rng/ingenic,rng.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/rng/ingenic,rng.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Bindings for RNG in Ingenic SoCs
|
||||
|
||||
maintainers:
|
||||
- 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
|
||||
|
||||
description:
|
||||
The Random Number Generator in Ingenic SoCs.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- ingenic,jz4780-rng
|
||||
- ingenic,x1000-rng
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
rng: rng@d8 {
|
||||
compatible = "ingenic,jz4780-rng";
|
||||
reg = <0xd8 0x8>;
|
||||
};
|
||||
...
|
@ -0,0 +1,36 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/rng/silex-insight,ba431-rng.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Silex Insight BA431 RNG bindings
|
||||
|
||||
description: |
|
||||
The BA431 hardware random number generator is an IP that is FIPS-140-2/3
|
||||
certified.
|
||||
|
||||
maintainers:
|
||||
- Olivier Sobrie <olivier.sobrie@silexinsight.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: silex-insight,ba431-rng
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
rng@42800000 {
|
||||
compatible = "silex-insight,ba431-rng";
|
||||
reg = <0x42800000 0x1000>;
|
||||
};
|
||||
|
||||
...
|
@ -830,11 +830,20 @@ F: include/uapi/rdma/efa-abi.h
|
||||
|
||||
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
|
||||
M: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
M: John Allen <john.allen@amd.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/crypto/ccp/
|
||||
F: include/linux/ccp.h
|
||||
|
||||
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - SEV SUPPORT
|
||||
M: Brijesh Singh <brijesh.singh@amd.com>
|
||||
M: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/crypto/ccp/sev*
|
||||
F: include/uapi/linux/psp-sev.h
|
||||
|
||||
AMD DISPLAY CORE
|
||||
M: Harry Wentland <harry.wentland@amd.com>
|
||||
M: Leo Li <sunpeng.li@amd.com>
|
||||
|
@ -39,7 +39,7 @@
|
||||
* CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
|
||||
* PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
|
||||
* at:
|
||||
* http://www.intel.com/products/processor/manuals/
|
||||
* https://www.intel.com/products/processor/manuals/
|
||||
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual
|
||||
* Volume 2B: Instruction Set Reference, N-Z
|
||||
*
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions");
|
||||
@ -27,12 +28,8 @@ MODULE_ALIAS_CRYPTO("ghash");
|
||||
#define GHASH_DIGEST_SIZE 16
|
||||
|
||||
struct ghash_key {
|
||||
u64 h[2];
|
||||
u64 h2[2];
|
||||
u64 h3[2];
|
||||
u64 h4[2];
|
||||
|
||||
be128 k;
|
||||
u64 h[][2];
|
||||
};
|
||||
|
||||
struct ghash_desc_ctx {
|
||||
@ -46,16 +43,12 @@ struct ghash_async_ctx {
|
||||
};
|
||||
|
||||
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
|
||||
struct ghash_key const *k,
|
||||
const char *head);
|
||||
u64 const h[][2], const char *head);
|
||||
|
||||
asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
|
||||
struct ghash_key const *k,
|
||||
const char *head);
|
||||
u64 const h[][2], const char *head);
|
||||
|
||||
static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src,
|
||||
struct ghash_key const *k,
|
||||
const char *head);
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64);
|
||||
|
||||
static int ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
@ -70,7 +63,10 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
|
||||
{
|
||||
if (likely(crypto_simd_usable())) {
|
||||
kernel_neon_begin();
|
||||
pmull_ghash_update(blocks, dg, src, key, head);
|
||||
if (static_branch_likely(&use_p64))
|
||||
pmull_ghash_update_p64(blocks, dg, src, key->h, head);
|
||||
else
|
||||
pmull_ghash_update_p8(blocks, dg, src, key->h, head);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
|
||||
@ -161,25 +157,26 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
||||
const u8 *inkey, unsigned int keylen)
|
||||
{
|
||||
struct ghash_key *key = crypto_shash_ctx(tfm);
|
||||
be128 h;
|
||||
|
||||
if (keylen != GHASH_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
/* needed for the fallback */
|
||||
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
|
||||
ghash_reflect(key->h, &key->k);
|
||||
ghash_reflect(key->h[0], &key->k);
|
||||
|
||||
h = key->k;
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h2, &h);
|
||||
if (static_branch_likely(&use_p64)) {
|
||||
be128 h = key->k;
|
||||
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h3, &h);
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h[1], &h);
|
||||
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h4, &h);
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h[2], &h);
|
||||
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h[3], &h);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -195,7 +192,7 @@ static struct shash_alg ghash_alg = {
|
||||
.base.cra_driver_name = "ghash-ce-sync",
|
||||
.base.cra_priority = 300 - 1,
|
||||
.base.cra_blocksize = GHASH_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct ghash_key),
|
||||
.base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -354,10 +351,10 @@ static int __init ghash_ce_mod_init(void)
|
||||
if (!(elf_hwcap & HWCAP_NEON))
|
||||
return -ENODEV;
|
||||
|
||||
if (elf_hwcap2 & HWCAP2_PMULL)
|
||||
pmull_ghash_update = pmull_ghash_update_p64;
|
||||
else
|
||||
pmull_ghash_update = pmull_ghash_update_p8;
|
||||
if (elf_hwcap2 & HWCAP2_PMULL) {
|
||||
ghash_alg.base.cra_ctxsize += 3 * sizeof(u64[2]);
|
||||
static_branch_enable(&use_p64);
|
||||
}
|
||||
|
||||
err = crypto_register_shash(&ghash_alg);
|
||||
if (err)
|
||||
|
@ -13,7 +13,7 @@
|
||||
@ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
|
||||
@ project. The module is, however, dual licensed under OpenSSL and
|
||||
@ CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
@ details see http://www.openssl.org/~appro/cryptogams/.
|
||||
@ details see https://www.openssl.org/~appro/cryptogams/.
|
||||
@ ====================================================================
|
||||
|
||||
@ sha1_block procedure for ARMv4.
|
||||
|
@ -13,7 +13,7 @@
|
||||
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
# project. The module is, however, dual licensed under OpenSSL and
|
||||
# CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
# details see http://www.openssl.org/~appro/cryptogams/.
|
||||
# details see https://www.openssl.org/~appro/cryptogams/.
|
||||
# ====================================================================
|
||||
|
||||
# SHA256 block procedure for ARMv4. May 2007.
|
||||
|
@ -12,7 +12,7 @@
|
||||
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
@ project. The module is, however, dual licensed under OpenSSL and
|
||||
@ CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
@ details see http://www.openssl.org/~appro/cryptogams/.
|
||||
@ details see https://www.openssl.org/~appro/cryptogams/.
|
||||
@ ====================================================================
|
||||
|
||||
@ SHA256 block procedure for ARMv4. May 2007.
|
||||
|
@ -13,7 +13,7 @@
|
||||
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
# project. The module is, however, dual licensed under OpenSSL and
|
||||
# CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
# details see http://www.openssl.org/~appro/cryptogams/.
|
||||
# details see https://www.openssl.org/~appro/cryptogams/.
|
||||
# ====================================================================
|
||||
|
||||
# SHA512 block procedure for ARMv4. September 2007.
|
||||
@ -43,7 +43,7 @@
|
||||
# terms it's 22.6 cycles per byte, which is disappointing result.
|
||||
# Technical writers asserted that 3-way S4 pipeline can sustain
|
||||
# multiple NEON instructions per cycle, but dual NEON issue could
|
||||
# not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
|
||||
# not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html
|
||||
# for further details. On side note Cortex-A15 processes one byte in
|
||||
# 16 cycles.
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
@ project. The module is, however, dual licensed under OpenSSL and
|
||||
@ CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
@ details see http://www.openssl.org/~appro/cryptogams/.
|
||||
@ details see https://www.openssl.org/~appro/cryptogams/.
|
||||
@ ====================================================================
|
||||
|
||||
@ SHA512 block procedure for ARMv4. September 2007.
|
||||
@ -42,7 +42,7 @@
|
||||
@ terms it's 22.6 cycles per byte, which is disappointing result.
|
||||
@ Technical writers asserted that 3-way S4 pipeline can sustain
|
||||
@ multiple NEON instructions per cycle, but dual NEON issue could
|
||||
@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
|
||||
@ not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html
|
||||
@ for further details. On side note Cortex-A15 processes one byte in
|
||||
@ 16 cycles.
|
||||
|
||||
|
@ -31,12 +31,8 @@ MODULE_ALIAS_CRYPTO("ghash");
|
||||
#define GCM_IV_SIZE 12
|
||||
|
||||
struct ghash_key {
|
||||
u64 h[2];
|
||||
u64 h2[2];
|
||||
u64 h3[2];
|
||||
u64 h4[2];
|
||||
|
||||
be128 k;
|
||||
u64 h[][2];
|
||||
};
|
||||
|
||||
struct ghash_desc_ctx {
|
||||
@ -51,22 +47,18 @@ struct gcm_aes_ctx {
|
||||
};
|
||||
|
||||
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
|
||||
struct ghash_key const *k,
|
||||
const char *head);
|
||||
u64 const h[][2], const char *head);
|
||||
|
||||
asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
|
||||
struct ghash_key const *k,
|
||||
const char *head);
|
||||
u64 const h[][2], const char *head);
|
||||
|
||||
asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[],
|
||||
struct ghash_key const *k, u64 dg[],
|
||||
u8 ctr[], u32 const rk[], int rounds,
|
||||
u8 tag[]);
|
||||
u64 const h[][2], u64 dg[], u8 ctr[],
|
||||
u32 const rk[], int rounds, u8 tag[]);
|
||||
|
||||
asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
|
||||
struct ghash_key const *k, u64 dg[],
|
||||
u8 ctr[], u32 const rk[], int rounds,
|
||||
u8 tag[]);
|
||||
u64 const h[][2], u64 dg[], u8 ctr[],
|
||||
u32 const rk[], int rounds, u8 tag[]);
|
||||
|
||||
static int ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
@ -77,48 +69,51 @@ static int ghash_init(struct shash_desc *desc)
|
||||
}
|
||||
|
||||
static void ghash_do_update(int blocks, u64 dg[], const char *src,
|
||||
struct ghash_key *key, const char *head,
|
||||
void (*simd_update)(int blocks, u64 dg[],
|
||||
const char *src,
|
||||
struct ghash_key const *k,
|
||||
const char *head))
|
||||
struct ghash_key *key, const char *head)
|
||||
{
|
||||
if (likely(crypto_simd_usable() && simd_update)) {
|
||||
be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
|
||||
|
||||
do {
|
||||
const u8 *in = src;
|
||||
|
||||
if (head) {
|
||||
in = head;
|
||||
blocks++;
|
||||
head = NULL;
|
||||
} else {
|
||||
src += GHASH_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
|
||||
gf128mul_lle(&dst, &key->k);
|
||||
} while (--blocks);
|
||||
|
||||
dg[0] = be64_to_cpu(dst.b);
|
||||
dg[1] = be64_to_cpu(dst.a);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
void ghash_do_simd_update(int blocks, u64 dg[], const char *src,
|
||||
struct ghash_key *key, const char *head,
|
||||
void (*simd_update)(int blocks, u64 dg[],
|
||||
const char *src,
|
||||
u64 const h[][2],
|
||||
const char *head))
|
||||
{
|
||||
if (likely(crypto_simd_usable())) {
|
||||
kernel_neon_begin();
|
||||
simd_update(blocks, dg, src, key, head);
|
||||
simd_update(blocks, dg, src, key->h, head);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
|
||||
|
||||
do {
|
||||
const u8 *in = src;
|
||||
|
||||
if (head) {
|
||||
in = head;
|
||||
blocks++;
|
||||
head = NULL;
|
||||
} else {
|
||||
src += GHASH_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
|
||||
gf128mul_lle(&dst, &key->k);
|
||||
} while (--blocks);
|
||||
|
||||
dg[0] = be64_to_cpu(dst.b);
|
||||
dg[1] = be64_to_cpu(dst.a);
|
||||
ghash_do_update(blocks, dg, src, key, head);
|
||||
}
|
||||
}
|
||||
|
||||
/* avoid hogging the CPU for too long */
|
||||
#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
|
||||
|
||||
static int __ghash_update(struct shash_desc *desc, const u8 *src,
|
||||
unsigned int len,
|
||||
void (*simd_update)(int blocks, u64 dg[],
|
||||
const char *src,
|
||||
struct ghash_key const *k,
|
||||
const char *head))
|
||||
static int ghash_update(struct shash_desc *desc, const u8 *src,
|
||||
unsigned int len)
|
||||
{
|
||||
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
|
||||
@ -143,9 +138,9 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src,
|
||||
do {
|
||||
int chunk = min(blocks, MAX_BLOCKS);
|
||||
|
||||
ghash_do_update(chunk, ctx->digest, src, key,
|
||||
partial ? ctx->buf : NULL,
|
||||
simd_update);
|
||||
ghash_do_simd_update(chunk, ctx->digest, src, key,
|
||||
partial ? ctx->buf : NULL,
|
||||
pmull_ghash_update_p8);
|
||||
|
||||
blocks -= chunk;
|
||||
src += chunk * GHASH_BLOCK_SIZE;
|
||||
@ -157,19 +152,7 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_update_p8(struct shash_desc *desc, const u8 *src,
|
||||
unsigned int len)
|
||||
{
|
||||
return __ghash_update(desc, src, len, pmull_ghash_update_p8);
|
||||
}
|
||||
|
||||
static int ghash_update_p64(struct shash_desc *desc, const u8 *src,
|
||||
unsigned int len)
|
||||
{
|
||||
return __ghash_update(desc, src, len, pmull_ghash_update_p64);
|
||||
}
|
||||
|
||||
static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
|
||||
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
|
||||
@ -179,28 +162,8 @@ static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
|
||||
|
||||
memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
|
||||
|
||||
ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
|
||||
pmull_ghash_update_p8);
|
||||
}
|
||||
put_unaligned_be64(ctx->digest[1], dst);
|
||||
put_unaligned_be64(ctx->digest[0], dst + 8);
|
||||
|
||||
*ctx = (struct ghash_desc_ctx){};
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_final_p64(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
|
||||
|
||||
if (partial) {
|
||||
struct ghash_key *key = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
|
||||
|
||||
ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
|
||||
pmull_ghash_update_p64);
|
||||
ghash_do_simd_update(1, ctx->digest, ctx->buf, key, NULL,
|
||||
pmull_ghash_update_p8);
|
||||
}
|
||||
put_unaligned_be64(ctx->digest[1], dst);
|
||||
put_unaligned_be64(ctx->digest[0], dst + 8);
|
||||
@ -220,29 +183,6 @@ static void ghash_reflect(u64 h[], const be128 *k)
|
||||
h[1] ^= 0xc200000000000000UL;
|
||||
}
|
||||
|
||||
static int __ghash_setkey(struct ghash_key *key,
|
||||
const u8 *inkey, unsigned int keylen)
|
||||
{
|
||||
be128 h;
|
||||
|
||||
/* needed for the fallback */
|
||||
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
|
||||
|
||||
ghash_reflect(key->h, &key->k);
|
||||
|
||||
h = key->k;
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h2, &h);
|
||||
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h3, &h);
|
||||
|
||||
gf128mul_lle(&h, &key->k);
|
||||
ghash_reflect(key->h4, &h);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_setkey(struct crypto_shash *tfm,
|
||||
const u8 *inkey, unsigned int keylen)
|
||||
{
|
||||
@ -251,38 +191,28 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
||||
if (keylen != GHASH_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
return __ghash_setkey(key, inkey, keylen);
|
||||
/* needed for the fallback */
|
||||
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
|
||||
|
||||
ghash_reflect(key->h[0], &key->k);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg ghash_alg[] = {{
|
||||
static struct shash_alg ghash_alg = {
|
||||
.base.cra_name = "ghash",
|
||||
.base.cra_driver_name = "ghash-neon",
|
||||
.base.cra_priority = 150,
|
||||
.base.cra_blocksize = GHASH_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct ghash_key),
|
||||
.base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = GHASH_DIGEST_SIZE,
|
||||
.init = ghash_init,
|
||||
.update = ghash_update_p8,
|
||||
.final = ghash_final_p8,
|
||||
.update = ghash_update,
|
||||
.final = ghash_final,
|
||||
.setkey = ghash_setkey,
|
||||
.descsize = sizeof(struct ghash_desc_ctx),
|
||||
}, {
|
||||
.base.cra_name = "ghash",
|
||||
.base.cra_driver_name = "ghash-ce",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = GHASH_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct ghash_key),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = GHASH_DIGEST_SIZE,
|
||||
.init = ghash_init,
|
||||
.update = ghash_update_p64,
|
||||
.final = ghash_final_p64,
|
||||
.setkey = ghash_setkey,
|
||||
.descsize = sizeof(struct ghash_desc_ctx),
|
||||
}};
|
||||
};
|
||||
|
||||
static int num_rounds(struct crypto_aes_ctx *ctx)
|
||||
{
|
||||
@ -301,6 +231,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
|
||||
{
|
||||
struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
be128 h;
|
||||
int ret;
|
||||
|
||||
ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
|
||||
@ -309,7 +240,22 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
|
||||
|
||||
aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
|
||||
|
||||
return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
|
||||
/* needed for the fallback */
|
||||
memcpy(&ctx->ghash_key.k, key, GHASH_BLOCK_SIZE);
|
||||
|
||||
ghash_reflect(ctx->ghash_key.h[0], &ctx->ghash_key.k);
|
||||
|
||||
h = ctx->ghash_key.k;
|
||||
gf128mul_lle(&h, &ctx->ghash_key.k);
|
||||
ghash_reflect(ctx->ghash_key.h[1], &h);
|
||||
|
||||
gf128mul_lle(&h, &ctx->ghash_key.k);
|
||||
ghash_reflect(ctx->ghash_key.h[2], &h);
|
||||
|
||||
gf128mul_lle(&h, &ctx->ghash_key.k);
|
||||
ghash_reflect(ctx->ghash_key.h[3], &h);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
||||
@ -341,9 +287,9 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
|
||||
if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
|
||||
int blocks = count / GHASH_BLOCK_SIZE;
|
||||
|
||||
ghash_do_update(blocks, dg, src, &ctx->ghash_key,
|
||||
*buf_count ? buf : NULL,
|
||||
pmull_ghash_update_p64);
|
||||
ghash_do_simd_update(blocks, dg, src, &ctx->ghash_key,
|
||||
*buf_count ? buf : NULL,
|
||||
pmull_ghash_update_p64);
|
||||
|
||||
src += blocks * GHASH_BLOCK_SIZE;
|
||||
count %= GHASH_BLOCK_SIZE;
|
||||
@ -387,8 +333,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
|
||||
|
||||
if (buf_count) {
|
||||
memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
|
||||
ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL,
|
||||
pmull_ghash_update_p64);
|
||||
ghash_do_simd_update(1, dg, buf, &ctx->ghash_key, NULL,
|
||||
pmull_ghash_update_p64);
|
||||
}
|
||||
}
|
||||
|
||||
@ -433,8 +379,8 @@ static int gcm_encrypt(struct aead_request *req)
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
pmull_gcm_encrypt(nbytes, dst, src, &ctx->ghash_key, dg,
|
||||
iv, ctx->aes_key.key_enc, nrounds,
|
||||
pmull_gcm_encrypt(nbytes, dst, src, ctx->ghash_key.h,
|
||||
dg, iv, ctx->aes_key.key_enc, nrounds,
|
||||
tag);
|
||||
kernel_neon_end();
|
||||
|
||||
@ -464,7 +410,7 @@ static int gcm_encrypt(struct aead_request *req)
|
||||
} while (--remaining > 0);
|
||||
|
||||
ghash_do_update(blocks, dg, walk.dst.virt.addr,
|
||||
&ctx->ghash_key, NULL, NULL);
|
||||
&ctx->ghash_key, NULL);
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % AES_BLOCK_SIZE);
|
||||
@ -483,7 +429,7 @@ static int gcm_encrypt(struct aead_request *req)
|
||||
|
||||
tag = (u8 *)&lengths;
|
||||
ghash_do_update(1, dg, tag, &ctx->ghash_key,
|
||||
walk.nbytes ? buf : NULL, NULL);
|
||||
walk.nbytes ? buf : NULL);
|
||||
|
||||
if (walk.nbytes)
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
@ -547,8 +493,8 @@ static int gcm_decrypt(struct aead_request *req)
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
pmull_gcm_decrypt(nbytes, dst, src, &ctx->ghash_key, dg,
|
||||
iv, ctx->aes_key.key_enc, nrounds,
|
||||
pmull_gcm_decrypt(nbytes, dst, src, ctx->ghash_key.h,
|
||||
dg, iv, ctx->aes_key.key_enc, nrounds,
|
||||
tag);
|
||||
kernel_neon_end();
|
||||
|
||||
@ -568,7 +514,7 @@ static int gcm_decrypt(struct aead_request *req)
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
|
||||
ghash_do_update(blocks, dg, walk.src.virt.addr,
|
||||
&ctx->ghash_key, NULL, NULL);
|
||||
&ctx->ghash_key, NULL);
|
||||
|
||||
do {
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
@ -591,7 +537,7 @@ static int gcm_decrypt(struct aead_request *req)
|
||||
|
||||
tag = (u8 *)&lengths;
|
||||
ghash_do_update(1, dg, tag, &ctx->ghash_key,
|
||||
walk.nbytes ? buf : NULL, NULL);
|
||||
walk.nbytes ? buf : NULL);
|
||||
|
||||
if (walk.nbytes) {
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
@ -635,43 +581,28 @@ static struct aead_alg gcm_aes_alg = {
|
||||
.base.cra_driver_name = "gcm-aes-ce",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct gcm_aes_ctx),
|
||||
.base.cra_ctxsize = sizeof(struct gcm_aes_ctx) +
|
||||
4 * sizeof(u64[2]),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init ghash_ce_mod_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_have_named_feature(ASIMD))
|
||||
return -ENODEV;
|
||||
|
||||
if (cpu_have_named_feature(PMULL))
|
||||
ret = crypto_register_shashes(ghash_alg,
|
||||
ARRAY_SIZE(ghash_alg));
|
||||
else
|
||||
/* only register the first array element */
|
||||
ret = crypto_register_shash(ghash_alg);
|
||||
return crypto_register_aead(&gcm_aes_alg);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cpu_have_named_feature(PMULL)) {
|
||||
ret = crypto_register_aead(&gcm_aes_alg);
|
||||
if (ret)
|
||||
crypto_unregister_shashes(ghash_alg,
|
||||
ARRAY_SIZE(ghash_alg));
|
||||
}
|
||||
return ret;
|
||||
return crypto_register_shash(&ghash_alg);
|
||||
}
|
||||
|
||||
static void __exit ghash_ce_mod_exit(void)
|
||||
{
|
||||
if (cpu_have_named_feature(PMULL))
|
||||
crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
|
||||
crypto_unregister_aead(&gcm_aes_alg);
|
||||
else
|
||||
crypto_unregister_shash(ghash_alg);
|
||||
crypto_unregister_aead(&gcm_aes_alg);
|
||||
crypto_unregister_shash(&ghash_alg);
|
||||
}
|
||||
|
||||
static const struct cpu_feature ghash_cpu_feature[] = {
|
||||
|
@ -156,7 +156,7 @@ static int sha256_sparc64_import(struct shash_desc *desc, const void *in)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg sha256 = {
|
||||
static struct shash_alg sha256_alg = {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.init = sha256_sparc64_init,
|
||||
.update = sha256_sparc64_update,
|
||||
@ -174,7 +174,7 @@ static struct shash_alg sha256 = {
|
||||
}
|
||||
};
|
||||
|
||||
static struct shash_alg sha224 = {
|
||||
static struct shash_alg sha224_alg = {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.init = sha224_sparc64_init,
|
||||
.update = sha256_sparc64_update,
|
||||
@ -206,13 +206,13 @@ static bool __init sparc64_has_sha256_opcode(void)
|
||||
static int __init sha256_sparc64_mod_init(void)
|
||||
{
|
||||
if (sparc64_has_sha256_opcode()) {
|
||||
int ret = crypto_register_shash(&sha224);
|
||||
int ret = crypto_register_shash(&sha224_alg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = crypto_register_shash(&sha256);
|
||||
ret = crypto_register_shash(&sha256_alg);
|
||||
if (ret < 0) {
|
||||
crypto_unregister_shash(&sha224);
|
||||
crypto_unregister_shash(&sha224_alg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -225,8 +225,8 @@ static int __init sha256_sparc64_mod_init(void)
|
||||
|
||||
static void __exit sha256_sparc64_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_shash(&sha224);
|
||||
crypto_unregister_shash(&sha256);
|
||||
crypto_unregister_shash(&sha224_alg);
|
||||
crypto_unregister_shash(&sha256_alg);
|
||||
}
|
||||
|
||||
module_init(sha256_sparc64_mod_init);
|
||||
|
@ -63,7 +63,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
#define VMOVDQ vmovdqu
|
||||
|
||||
@ -127,10 +126,6 @@ ddq_add_8:
|
||||
|
||||
/* generate a unique variable for ddq_add_x */
|
||||
|
||||
.macro setddq n
|
||||
var_ddq_add = ddq_add_\n
|
||||
.endm
|
||||
|
||||
/* generate a unique variable for xmm register */
|
||||
.macro setxdata n
|
||||
var_xdata = %xmm\n
|
||||
@ -140,9 +135,7 @@ ddq_add_8:
|
||||
|
||||
.macro club name, id
|
||||
.altmacro
|
||||
.if \name == DDQ_DATA
|
||||
setddq %\id
|
||||
.elseif \name == XDATA
|
||||
.if \name == XDATA
|
||||
setxdata %\id
|
||||
.endif
|
||||
.noaltmacro
|
||||
@ -165,9 +158,8 @@ ddq_add_8:
|
||||
|
||||
.set i, 1
|
||||
.rept (by - 1)
|
||||
club DDQ_DATA, i
|
||||
club XDATA, i
|
||||
vpaddq var_ddq_add(%rip), xcounter, var_xdata
|
||||
vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
|
||||
vptest ddq_low_msk(%rip), var_xdata
|
||||
jnz 1f
|
||||
vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
|
||||
@ -180,8 +172,7 @@ ddq_add_8:
|
||||
vmovdqa 1*16(p_keys), xkeyA
|
||||
|
||||
vpxor xkey0, xdata0, xdata0
|
||||
club DDQ_DATA, by
|
||||
vpaddq var_ddq_add(%rip), xcounter, xcounter
|
||||
vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
|
||||
vptest ddq_low_msk(%rip), xcounter
|
||||
jnz 1f
|
||||
vpaddq ddq_high_add_1(%rip), xcounter, xcounter
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -120,7 +120,6 @@
|
||||
##
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
# constants in mergeable sections, linker can reorder and merge
|
||||
.section .rodata.cst16.POLY, "aM", @progbits, 16
|
||||
|
@ -120,10 +120,10 @@ SYM_FUNC_START(chacha_block_xor_ssse3)
|
||||
FRAME_BEGIN
|
||||
|
||||
# x0..3 = s0..3
|
||||
movdqa 0x00(%rdi),%xmm0
|
||||
movdqa 0x10(%rdi),%xmm1
|
||||
movdqa 0x20(%rdi),%xmm2
|
||||
movdqa 0x30(%rdi),%xmm3
|
||||
movdqu 0x00(%rdi),%xmm0
|
||||
movdqu 0x10(%rdi),%xmm1
|
||||
movdqu 0x20(%rdi),%xmm2
|
||||
movdqu 0x30(%rdi),%xmm3
|
||||
movdqa %xmm0,%xmm8
|
||||
movdqa %xmm1,%xmm9
|
||||
movdqa %xmm2,%xmm10
|
||||
@ -205,10 +205,10 @@ SYM_FUNC_START(hchacha_block_ssse3)
|
||||
# %edx: nrounds
|
||||
FRAME_BEGIN
|
||||
|
||||
movdqa 0x00(%rdi),%xmm0
|
||||
movdqa 0x10(%rdi),%xmm1
|
||||
movdqa 0x20(%rdi),%xmm2
|
||||
movdqa 0x30(%rdi),%xmm3
|
||||
movdqu 0x00(%rdi),%xmm0
|
||||
movdqu 0x10(%rdi),%xmm1
|
||||
movdqu 0x20(%rdi),%xmm2
|
||||
movdqu 0x30(%rdi),%xmm3
|
||||
|
||||
mov %edx,%r8d
|
||||
call chacha_permute
|
||||
|
@ -14,8 +14,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
#define CHACHA_STATE_ALIGN 16
|
||||
|
||||
asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int len, int nrounds);
|
||||
asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
|
||||
@ -124,8 +122,6 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
|
||||
|
||||
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
|
||||
{
|
||||
state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
|
||||
|
||||
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
|
||||
hchacha_block_generic(state, stream, nrounds);
|
||||
} else {
|
||||
@ -138,8 +134,6 @@ EXPORT_SYMBOL(hchacha_block_arch);
|
||||
|
||||
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
|
||||
{
|
||||
state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
|
||||
|
||||
chacha_init_generic(state, key, iv);
|
||||
}
|
||||
EXPORT_SYMBOL(chacha_init_arch);
|
||||
@ -147,8 +141,6 @@ EXPORT_SYMBOL(chacha_init_arch);
|
||||
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
|
||||
int nrounds)
|
||||
{
|
||||
state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
|
||||
|
||||
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
|
||||
bytes <= CHACHA_BLOCK_SIZE)
|
||||
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
|
||||
@ -170,15 +162,12 @@ EXPORT_SYMBOL(chacha_crypt_arch);
|
||||
static int chacha_simd_stream_xor(struct skcipher_request *req,
|
||||
const struct chacha_ctx *ctx, const u8 *iv)
|
||||
{
|
||||
u32 *state, state_buf[16 + 2] __aligned(8);
|
||||
u32 state[CHACHA_STATE_WORDS] __aligned(8);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
||||
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
||||
|
||||
chacha_init_generic(state, ctx->key, iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
@ -217,12 +206,10 @@ static int xchacha_simd(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
u32 *state, state_buf[16 + 2] __aligned(8);
|
||||
u32 state[CHACHA_STATE_WORDS] __aligned(8);
|
||||
struct chacha_ctx subctx;
|
||||
u8 real_iv[16];
|
||||
|
||||
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
||||
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
||||
chacha_init_generic(state, ctx->key, req->iv);
|
||||
|
||||
if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
|
||||
|
@ -38,7 +38,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
|
||||
.section .rodata
|
||||
@ -129,17 +128,17 @@ loop_64:/* 64 bytes Full cache line folding */
|
||||
#ifdef __x86_64__
|
||||
movdqa %xmm4, %xmm8
|
||||
#endif
|
||||
PCLMULQDQ 00, CONSTANT, %xmm1
|
||||
PCLMULQDQ 00, CONSTANT, %xmm2
|
||||
PCLMULQDQ 00, CONSTANT, %xmm3
|
||||
pclmulqdq $0x00, CONSTANT, %xmm1
|
||||
pclmulqdq $0x00, CONSTANT, %xmm2
|
||||
pclmulqdq $0x00, CONSTANT, %xmm3
|
||||
#ifdef __x86_64__
|
||||
PCLMULQDQ 00, CONSTANT, %xmm4
|
||||
pclmulqdq $0x00, CONSTANT, %xmm4
|
||||
#endif
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm5
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm6
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm7
|
||||
pclmulqdq $0x11, CONSTANT, %xmm5
|
||||
pclmulqdq $0x11, CONSTANT, %xmm6
|
||||
pclmulqdq $0x11, CONSTANT, %xmm7
|
||||
#ifdef __x86_64__
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm8
|
||||
pclmulqdq $0x11, CONSTANT, %xmm8
|
||||
#endif
|
||||
pxor %xmm5, %xmm1
|
||||
pxor %xmm6, %xmm2
|
||||
@ -149,8 +148,8 @@ loop_64:/* 64 bytes Full cache line folding */
|
||||
#else
|
||||
/* xmm8 unsupported for x32 */
|
||||
movdqa %xmm4, %xmm5
|
||||
PCLMULQDQ 00, CONSTANT, %xmm4
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm5
|
||||
pclmulqdq $0x00, CONSTANT, %xmm4
|
||||
pclmulqdq $0x11, CONSTANT, %xmm5
|
||||
pxor %xmm5, %xmm4
|
||||
#endif
|
||||
|
||||
@ -172,20 +171,20 @@ less_64:/* Folding cache line into 128bit */
|
||||
prefetchnta (BUF)
|
||||
|
||||
movdqa %xmm1, %xmm5
|
||||
PCLMULQDQ 0x00, CONSTANT, %xmm1
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm5
|
||||
pclmulqdq $0x00, CONSTANT, %xmm1
|
||||
pclmulqdq $0x11, CONSTANT, %xmm5
|
||||
pxor %xmm5, %xmm1
|
||||
pxor %xmm2, %xmm1
|
||||
|
||||
movdqa %xmm1, %xmm5
|
||||
PCLMULQDQ 0x00, CONSTANT, %xmm1
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm5
|
||||
pclmulqdq $0x00, CONSTANT, %xmm1
|
||||
pclmulqdq $0x11, CONSTANT, %xmm5
|
||||
pxor %xmm5, %xmm1
|
||||
pxor %xmm3, %xmm1
|
||||
|
||||
movdqa %xmm1, %xmm5
|
||||
PCLMULQDQ 0x00, CONSTANT, %xmm1
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm5
|
||||
pclmulqdq $0x00, CONSTANT, %xmm1
|
||||
pclmulqdq $0x11, CONSTANT, %xmm5
|
||||
pxor %xmm5, %xmm1
|
||||
pxor %xmm4, %xmm1
|
||||
|
||||
@ -193,8 +192,8 @@ less_64:/* Folding cache line into 128bit */
|
||||
jb fold_64
|
||||
loop_16:/* Folding rest buffer into 128bit */
|
||||
movdqa %xmm1, %xmm5
|
||||
PCLMULQDQ 0x00, CONSTANT, %xmm1
|
||||
PCLMULQDQ 0x11, CONSTANT, %xmm5
|
||||
pclmulqdq $0x00, CONSTANT, %xmm1
|
||||
pclmulqdq $0x11, CONSTANT, %xmm5
|
||||
pxor %xmm5, %xmm1
|
||||
pxor (BUF), %xmm1
|
||||
sub $0x10, LEN
|
||||
@ -205,7 +204,7 @@ loop_16:/* Folding rest buffer into 128bit */
|
||||
fold_64:
|
||||
/* perform the last 64 bit fold, also adds 32 zeroes
|
||||
* to the input stream */
|
||||
PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
|
||||
pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
|
||||
psrldq $0x08, %xmm1
|
||||
pxor CONSTANT, %xmm1
|
||||
|
||||
@ -220,7 +219,7 @@ fold_64:
|
||||
#endif
|
||||
psrldq $0x04, %xmm2
|
||||
pand %xmm3, %xmm1
|
||||
PCLMULQDQ 0x00, CONSTANT, %xmm1
|
||||
pclmulqdq $0x00, CONSTANT, %xmm1
|
||||
pxor %xmm2, %xmm1
|
||||
|
||||
/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
|
||||
@ -231,11 +230,11 @@ fold_64:
|
||||
#endif
|
||||
movdqa %xmm1, %xmm2
|
||||
pand %xmm3, %xmm1
|
||||
PCLMULQDQ 0x10, CONSTANT, %xmm1
|
||||
pclmulqdq $0x10, CONSTANT, %xmm1
|
||||
pand %xmm3, %xmm1
|
||||
PCLMULQDQ 0x00, CONSTANT, %xmm1
|
||||
pclmulqdq $0x00, CONSTANT, %xmm1
|
||||
pxor %xmm2, %xmm1
|
||||
PEXTRD 0x01, %xmm1, %eax
|
||||
pextrd $0x01, %xmm1, %eax
|
||||
|
||||
ret
|
||||
SYM_FUNC_END(crc32_pclmul_le_16)
|
||||
|
@ -43,7 +43,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/inst.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
@ -170,7 +169,7 @@ continue_block:
|
||||
|
||||
## branch into array
|
||||
lea jump_table(%rip), %bufp
|
||||
movzxw (%bufp, %rax, 2), len
|
||||
movzwq (%bufp, %rax, 2), len
|
||||
lea crc_array(%rip), %bufp
|
||||
lea (%bufp, len, 1), %bufp
|
||||
JMP_NOSPEC bufp
|
||||
@ -225,10 +224,10 @@ LABEL crc_ %i
|
||||
subq %rax, tmp # tmp -= rax*24
|
||||
|
||||
movq crc_init, %xmm1 # CRC for block 1
|
||||
PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2
|
||||
pclmulqdq $0x00, %xmm0, %xmm1 # Multiply by K2
|
||||
|
||||
movq crc1, %xmm2 # CRC for block 2
|
||||
PCLMULQDQ 0x10, %xmm0, %xmm2 # Multiply by K1
|
||||
pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1
|
||||
|
||||
pxor %xmm2,%xmm1
|
||||
movq %xmm1, %rax
|
||||
|
@ -948,10 +948,8 @@ static void store_felem(u64 *b, u64 *f)
|
||||
{
|
||||
u64 f30 = f[3U];
|
||||
u64 top_bit0 = f30 >> (u32)63U;
|
||||
u64 carry0;
|
||||
u64 f31;
|
||||
u64 top_bit;
|
||||
u64 carry;
|
||||
u64 f0;
|
||||
u64 f1;
|
||||
u64 f2;
|
||||
@ -970,11 +968,11 @@ static void store_felem(u64 *b, u64 *f)
|
||||
u64 o2;
|
||||
u64 o3;
|
||||
f[3U] = f30 & (u64)0x7fffffffffffffffU;
|
||||
carry0 = add_scalar(f, f, (u64)19U * top_bit0);
|
||||
add_scalar(f, f, (u64)19U * top_bit0);
|
||||
f31 = f[3U];
|
||||
top_bit = f31 >> (u32)63U;
|
||||
f[3U] = f31 & (u64)0x7fffffffffffffffU;
|
||||
carry = add_scalar(f, f, (u64)19U * top_bit);
|
||||
add_scalar(f, f, (u64)19U * top_bit);
|
||||
f0 = f[0U];
|
||||
f1 = f[1U];
|
||||
f2 = f[2U];
|
||||
|
@ -14,7 +14,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/inst.h>
|
||||
#include <asm/frame.h>
|
||||
|
||||
.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
|
||||
@ -51,9 +50,9 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble)
|
||||
pxor DATA, T2
|
||||
pxor SHASH, T3
|
||||
|
||||
PCLMULQDQ 0x00 SHASH DATA # DATA = a0 * b0
|
||||
PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1
|
||||
PCLMULQDQ 0x00 T3 T2 # T2 = (a1 + a0) * (b1 + b0)
|
||||
pclmulqdq $0x00, SHASH, DATA # DATA = a0 * b0
|
||||
pclmulqdq $0x11, SHASH, T1 # T1 = a1 * b1
|
||||
pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0)
|
||||
pxor DATA, T2
|
||||
pxor T1, T2 # T2 = a0 * b1 + a1 * b0
|
||||
|
||||
@ -95,9 +94,9 @@ SYM_FUNC_START(clmul_ghash_mul)
|
||||
movups (%rdi), DATA
|
||||
movups (%rsi), SHASH
|
||||
movaps .Lbswap_mask, BSWAP
|
||||
PSHUFB_XMM BSWAP DATA
|
||||
pshufb BSWAP, DATA
|
||||
call __clmul_gf128mul_ble
|
||||
PSHUFB_XMM BSWAP DATA
|
||||
pshufb BSWAP, DATA
|
||||
movups DATA, (%rdi)
|
||||
FRAME_END
|
||||
ret
|
||||
@ -114,18 +113,18 @@ SYM_FUNC_START(clmul_ghash_update)
|
||||
movaps .Lbswap_mask, BSWAP
|
||||
movups (%rdi), DATA
|
||||
movups (%rcx), SHASH
|
||||
PSHUFB_XMM BSWAP DATA
|
||||
pshufb BSWAP, DATA
|
||||
.align 4
|
||||
.Lupdate_loop:
|
||||
movups (%rsi), IN1
|
||||
PSHUFB_XMM BSWAP IN1
|
||||
pshufb BSWAP, IN1
|
||||
pxor IN1, DATA
|
||||
call __clmul_gf128mul_ble
|
||||
sub $16, %rdx
|
||||
add $16, %rsi
|
||||
cmp $16, %rdx
|
||||
jge .Lupdate_loop
|
||||
PSHUFB_XMM BSWAP DATA
|
||||
pshufb BSWAP, DATA
|
||||
movups DATA, (%rdi)
|
||||
.Lupdate_just_ret:
|
||||
FRAME_END
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
#define REG_TYPE_R32 0
|
||||
#define REG_TYPE_R64 1
|
||||
#define REG_TYPE_XMM 2
|
||||
#define REG_TYPE_INVALID 100
|
||||
|
||||
.macro R32_NUM opd r32
|
||||
@ -123,77 +122,18 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro XMM_NUM opd xmm
|
||||
\opd = REG_NUM_INVALID
|
||||
.ifc \xmm,%xmm0
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \xmm,%xmm1
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \xmm,%xmm2
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \xmm,%xmm3
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \xmm,%xmm4
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \xmm,%xmm5
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \xmm,%xmm6
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \xmm,%xmm7
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \xmm,%xmm8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \xmm,%xmm9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \xmm,%xmm10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \xmm,%xmm11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \xmm,%xmm12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \xmm,%xmm13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \xmm,%xmm14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \xmm,%xmm15
|
||||
\opd = 15
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro REG_TYPE type reg
|
||||
R32_NUM reg_type_r32 \reg
|
||||
R64_NUM reg_type_r64 \reg
|
||||
XMM_NUM reg_type_xmm \reg
|
||||
.if reg_type_r64 <> REG_NUM_INVALID
|
||||
\type = REG_TYPE_R64
|
||||
.elseif reg_type_r32 <> REG_NUM_INVALID
|
||||
\type = REG_TYPE_R32
|
||||
.elseif reg_type_xmm <> REG_NUM_INVALID
|
||||
\type = REG_TYPE_XMM
|
||||
.else
|
||||
\type = REG_TYPE_INVALID
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro PFX_OPD_SIZE
|
||||
.byte 0x66
|
||||
.endm
|
||||
|
||||
.macro PFX_REX opd1 opd2 W=0
|
||||
.if ((\opd1 | \opd2) & 8) || \W
|
||||
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
|
||||
@ -203,109 +143,6 @@
|
||||
.macro MODRM mod opd1 opd2
|
||||
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
|
||||
.endm
|
||||
|
||||
.macro PSHUFB_XMM xmm1 xmm2
|
||||
XMM_NUM pshufb_opd1 \xmm1
|
||||
XMM_NUM pshufb_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX pshufb_opd1 pshufb_opd2
|
||||
.byte 0x0f, 0x38, 0x00
|
||||
MODRM 0xc0 pshufb_opd1 pshufb_opd2
|
||||
.endm
|
||||
|
||||
.macro PCLMULQDQ imm8 xmm1 xmm2
|
||||
XMM_NUM clmul_opd1 \xmm1
|
||||
XMM_NUM clmul_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX clmul_opd1 clmul_opd2
|
||||
.byte 0x0f, 0x3a, 0x44
|
||||
MODRM 0xc0 clmul_opd1 clmul_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro PEXTRD imm8 xmm gpr
|
||||
R32_NUM extrd_opd1 \gpr
|
||||
XMM_NUM extrd_opd2 \xmm
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX extrd_opd1 extrd_opd2
|
||||
.byte 0x0f, 0x3a, 0x16
|
||||
MODRM 0xc0 extrd_opd1 extrd_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro AESKEYGENASSIST rcon xmm1 xmm2
|
||||
XMM_NUM aeskeygen_opd1 \xmm1
|
||||
XMM_NUM aeskeygen_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX aeskeygen_opd1 aeskeygen_opd2
|
||||
.byte 0x0f, 0x3a, 0xdf
|
||||
MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2
|
||||
.byte \rcon
|
||||
.endm
|
||||
|
||||
.macro AESIMC xmm1 xmm2
|
||||
XMM_NUM aesimc_opd1 \xmm1
|
||||
XMM_NUM aesimc_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX aesimc_opd1 aesimc_opd2
|
||||
.byte 0x0f, 0x38, 0xdb
|
||||
MODRM 0xc0 aesimc_opd1 aesimc_opd2
|
||||
.endm
|
||||
|
||||
.macro AESENC xmm1 xmm2
|
||||
XMM_NUM aesenc_opd1 \xmm1
|
||||
XMM_NUM aesenc_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX aesenc_opd1 aesenc_opd2
|
||||
.byte 0x0f, 0x38, 0xdc
|
||||
MODRM 0xc0 aesenc_opd1 aesenc_opd2
|
||||
.endm
|
||||
|
||||
.macro AESENCLAST xmm1 xmm2
|
||||
XMM_NUM aesenclast_opd1 \xmm1
|
||||
XMM_NUM aesenclast_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX aesenclast_opd1 aesenclast_opd2
|
||||
.byte 0x0f, 0x38, 0xdd
|
||||
MODRM 0xc0 aesenclast_opd1 aesenclast_opd2
|
||||
.endm
|
||||
|
||||
.macro AESDEC xmm1 xmm2
|
||||
XMM_NUM aesdec_opd1 \xmm1
|
||||
XMM_NUM aesdec_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX aesdec_opd1 aesdec_opd2
|
||||
.byte 0x0f, 0x38, 0xde
|
||||
MODRM 0xc0 aesdec_opd1 aesdec_opd2
|
||||
.endm
|
||||
|
||||
.macro AESDECLAST xmm1 xmm2
|
||||
XMM_NUM aesdeclast_opd1 \xmm1
|
||||
XMM_NUM aesdeclast_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX aesdeclast_opd1 aesdeclast_opd2
|
||||
.byte 0x0f, 0x38, 0xdf
|
||||
MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
|
||||
.endm
|
||||
|
||||
.macro MOVQ_R64_XMM opd1 opd2
|
||||
REG_TYPE movq_r64_xmm_opd1_type \opd1
|
||||
.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
|
||||
XMM_NUM movq_r64_xmm_opd1 \opd1
|
||||
R64_NUM movq_r64_xmm_opd2 \opd2
|
||||
.else
|
||||
R64_NUM movq_r64_xmm_opd1 \opd1
|
||||
XMM_NUM movq_r64_xmm_opd2 \opd2
|
||||
.endif
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
|
||||
.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
|
||||
.byte 0x0f, 0x7e
|
||||
.else
|
||||
.byte 0x0f, 0x6e
|
||||
.endif
|
||||
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
|
||||
.endm
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -548,7 +548,7 @@ config CRYPTO_XCBC
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
XCBC: Keyed-Hashing with encryption algorithm
|
||||
http://www.ietf.org/rfc/rfc3566.txt
|
||||
https://www.ietf.org/rfc/rfc3566.txt
|
||||
http://csrc.nist.gov/encryption/modes/proposedmodes/
|
||||
xcbc-mac/xcbc-mac-spec.pdf
|
||||
|
||||
@ -561,7 +561,7 @@ config CRYPTO_VMAC
|
||||
very high speed on 64-bit architectures.
|
||||
|
||||
See also:
|
||||
<http://fastcrypto.org/vmac>
|
||||
<https://fastcrypto.org/vmac>
|
||||
|
||||
comment "Digest"
|
||||
|
||||
@ -816,7 +816,7 @@ config CRYPTO_RMD128
|
||||
RIPEMD-160 should be used.
|
||||
|
||||
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
|
||||
See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
|
||||
See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
|
||||
|
||||
config CRYPTO_RMD160
|
||||
tristate "RIPEMD-160 digest algorithm"
|
||||
@ -833,7 +833,7 @@ config CRYPTO_RMD160
|
||||
against RIPEMD-160.
|
||||
|
||||
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
|
||||
See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
|
||||
See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
|
||||
|
||||
config CRYPTO_RMD256
|
||||
tristate "RIPEMD-256 digest algorithm"
|
||||
@ -845,7 +845,7 @@ config CRYPTO_RMD256
|
||||
(than RIPEMD-128).
|
||||
|
||||
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
|
||||
See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
|
||||
See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
|
||||
|
||||
config CRYPTO_RMD320
|
||||
tristate "RIPEMD-320 digest algorithm"
|
||||
@ -857,7 +857,7 @@ config CRYPTO_RMD320
|
||||
(than RIPEMD-160).
|
||||
|
||||
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
|
||||
See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
|
||||
See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
|
||||
|
||||
config CRYPTO_SHA1
|
||||
tristate "SHA1 digest algorithm"
|
||||
@ -1045,7 +1045,7 @@ config CRYPTO_TGR192
|
||||
Tiger was developed by Ross Anderson and Eli Biham.
|
||||
|
||||
See also:
|
||||
<http://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
|
||||
<https://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
|
||||
|
||||
config CRYPTO_WP512
|
||||
tristate "Whirlpool digest algorithms"
|
||||
@ -1221,7 +1221,7 @@ config CRYPTO_BLOWFISH
|
||||
designed for use on "large microprocessors".
|
||||
|
||||
See also:
|
||||
<http://www.schneier.com/blowfish.html>
|
||||
<https://www.schneier.com/blowfish.html>
|
||||
|
||||
config CRYPTO_BLOWFISH_COMMON
|
||||
tristate
|
||||
@ -1230,7 +1230,7 @@ config CRYPTO_BLOWFISH_COMMON
|
||||
generic c and the assembler implementations.
|
||||
|
||||
See also:
|
||||
<http://www.schneier.com/blowfish.html>
|
||||
<https://www.schneier.com/blowfish.html>
|
||||
|
||||
config CRYPTO_BLOWFISH_X86_64
|
||||
tristate "Blowfish cipher algorithm (x86_64)"
|
||||
@ -1245,7 +1245,7 @@ config CRYPTO_BLOWFISH_X86_64
|
||||
designed for use on "large microprocessors".
|
||||
|
||||
See also:
|
||||
<http://www.schneier.com/blowfish.html>
|
||||
<https://www.schneier.com/blowfish.html>
|
||||
|
||||
config CRYPTO_CAMELLIA
|
||||
tristate "Camellia cipher algorithms"
|
||||
@ -1441,10 +1441,10 @@ config CRYPTO_SALSA20
|
||||
Salsa20 stream cipher algorithm.
|
||||
|
||||
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
|
||||
Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
|
||||
Stream Cipher Project. See <https://www.ecrypt.eu.org/stream/>
|
||||
|
||||
The Salsa20 stream cipher algorithm is designed by Daniel J.
|
||||
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
|
||||
Bernstein <djb@cr.yp.to>. See <https://cr.yp.to/snuffle.html>
|
||||
|
||||
config CRYPTO_CHACHA20
|
||||
tristate "ChaCha stream cipher algorithms"
|
||||
@ -1456,7 +1456,7 @@ config CRYPTO_CHACHA20
|
||||
ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
|
||||
Bernstein and further specified in RFC7539 for use in IETF protocols.
|
||||
This is the portable C implementation of ChaCha20. See also:
|
||||
<http://cr.yp.to/chacha/chacha-20080128.pdf>
|
||||
<https://cr.yp.to/chacha/chacha-20080128.pdf>
|
||||
|
||||
XChaCha20 is the application of the XSalsa20 construction to ChaCha20
|
||||
rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length
|
||||
@ -1509,7 +1509,7 @@ config CRYPTO_SERPENT
|
||||
variant of Serpent for compatibility with old kerneli.org code.
|
||||
|
||||
See also:
|
||||
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
<https://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
|
||||
config CRYPTO_SERPENT_SSE2_X86_64
|
||||
tristate "Serpent cipher algorithm (x86_64/SSE2)"
|
||||
@ -1528,7 +1528,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
|
||||
blocks parallel using SSE2 instruction set.
|
||||
|
||||
See also:
|
||||
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
<https://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
|
||||
config CRYPTO_SERPENT_SSE2_586
|
||||
tristate "Serpent cipher algorithm (i586/SSE2)"
|
||||
@ -1547,7 +1547,7 @@ config CRYPTO_SERPENT_SSE2_586
|
||||
blocks parallel using SSE2 instruction set.
|
||||
|
||||
See also:
|
||||
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
<https://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
|
||||
config CRYPTO_SERPENT_AVX_X86_64
|
||||
tristate "Serpent cipher algorithm (x86_64/AVX)"
|
||||
@ -1567,7 +1567,7 @@ config CRYPTO_SERPENT_AVX_X86_64
|
||||
eight blocks parallel using the AVX instruction set.
|
||||
|
||||
See also:
|
||||
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
<https://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
|
||||
config CRYPTO_SERPENT_AVX2_X86_64
|
||||
tristate "Serpent cipher algorithm (x86_64/AVX2)"
|
||||
@ -1583,7 +1583,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
|
||||
blocks parallel using AVX2 instruction set.
|
||||
|
||||
See also:
|
||||
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
<https://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
|
||||
config CRYPTO_SM4
|
||||
tristate "SM4 cipher algorithm"
|
||||
@ -1640,7 +1640,7 @@ config CRYPTO_TWOFISH
|
||||
bits.
|
||||
|
||||
See also:
|
||||
<http://www.schneier.com/twofish.html>
|
||||
<https://www.schneier.com/twofish.html>
|
||||
|
||||
config CRYPTO_TWOFISH_COMMON
|
||||
tristate
|
||||
@ -1662,7 +1662,7 @@ config CRYPTO_TWOFISH_586
|
||||
bits.
|
||||
|
||||
See also:
|
||||
<http://www.schneier.com/twofish.html>
|
||||
<https://www.schneier.com/twofish.html>
|
||||
|
||||
config CRYPTO_TWOFISH_X86_64
|
||||
tristate "Twofish cipher algorithm (x86_64)"
|
||||
@ -1678,7 +1678,7 @@ config CRYPTO_TWOFISH_X86_64
|
||||
bits.
|
||||
|
||||
See also:
|
||||
<http://www.schneier.com/twofish.html>
|
||||
<https://www.schneier.com/twofish.html>
|
||||
|
||||
config CRYPTO_TWOFISH_X86_64_3WAY
|
||||
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
|
||||
@ -1699,7 +1699,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY
|
||||
blocks parallel, utilizing resources of out-of-order CPUs better.
|
||||
|
||||
See also:
|
||||
<http://www.schneier.com/twofish.html>
|
||||
<https://www.schneier.com/twofish.html>
|
||||
|
||||
config CRYPTO_TWOFISH_AVX_X86_64
|
||||
tristate "Twofish cipher algorithm (x86_64/AVX)"
|
||||
@ -1722,7 +1722,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
|
||||
eight blocks parallel using the AVX Instruction Set.
|
||||
|
||||
See also:
|
||||
<http://www.schneier.com/twofish.html>
|
||||
<https://www.schneier.com/twofish.html>
|
||||
|
||||
comment "Compression"
|
||||
|
||||
|
@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
|
||||
|
||||
struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
|
||||
u32 mask, int node)
|
||||
{
|
||||
return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
|
||||
node);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
|
||||
|
||||
struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
|
||||
|
@ -490,7 +490,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
|
||||
|
||||
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
const char *nhpoly1305_name;
|
||||
struct skcipher_instance *inst;
|
||||
@ -500,14 +499,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
struct shash_alg *hash_alg;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -565,8 +559,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
|
||||
CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
|
||||
inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
|
||||
|
@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
|
||||
|
||||
if (!ctx->used)
|
||||
ctx->merge = 0;
|
||||
ctx->init = ctx->more;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
|
||||
|
||||
@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
|
||||
*
|
||||
* @sk socket of connection to user space
|
||||
* @flags If MSG_DONTWAIT is set, then only report if function would sleep
|
||||
* @min Set to minimum request size if partial requests are allowed.
|
||||
* @return 0 when writable memory is available, < 0 upon error
|
||||
*/
|
||||
int af_alg_wait_for_data(struct sock *sk, unsigned flags)
|
||||
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
|
||||
{
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags)
|
||||
if (signal_pending(current))
|
||||
break;
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
|
||||
if (sk_wait_event(sk, &timeout,
|
||||
ctx->init && (!ctx->more ||
|
||||
(min && ctx->used >= min)),
|
||||
&wait)) {
|
||||
err = 0;
|
||||
break;
|
||||
@ -843,10 +847,11 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
if (!ctx->more && ctx->used) {
|
||||
if (ctx->init && (init || !ctx->more)) {
|
||||
err = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
ctx->init = true;
|
||||
|
||||
if (init) {
|
||||
ctx->enc = enc;
|
||||
|
@ -690,6 +690,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
|
||||
spawn->mask = mask;
|
||||
spawn->next = inst->spawns;
|
||||
inst->spawns = spawn;
|
||||
inst->alg.cra_flags |=
|
||||
(alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
|
||||
err = 0;
|
||||
}
|
||||
up_write(&crypto_alg_sem);
|
||||
@ -816,7 +818,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_get_attr_type);
|
||||
|
||||
int crypto_check_attr_type(struct rtattr **tb, u32 type)
|
||||
/**
|
||||
* crypto_check_attr_type() - check algorithm type and compute inherited mask
|
||||
* @tb: the template parameters
|
||||
* @type: the algorithm type the template would be instantiated as
|
||||
* @mask_ret: (output) the mask that should be passed to crypto_grab_*()
|
||||
* to restrict the flags of any inner algorithms
|
||||
*
|
||||
* Validate that the algorithm type the user requested is compatible with the
|
||||
* one the template would actually be instantiated as. E.g., if the user is
|
||||
* doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because
|
||||
* the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm.
|
||||
*
|
||||
* Also compute the mask to use to restrict the flags of any inner algorithms.
|
||||
*
|
||||
* Return: 0 on success; -errno on failure
|
||||
*/
|
||||
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
|
||||
@ -827,6 +845,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type)
|
||||
if ((algt->type ^ type) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
*mask_ret = crypto_algt_inherited_mask(algt);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_check_attr_type);
|
||||
|
@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t usedpages = 0; /* [in] RX bufs to be used from user */
|
||||
size_t processed = 0; /* [in] TX bufs to be consumed */
|
||||
|
||||
if (!ctx->used) {
|
||||
err = af_alg_wait_for_data(sk, flags);
|
||||
if (!ctx->init || ctx->more) {
|
||||
err = af_alg_wait_for_data(sk, flags, 0);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
int err = 0;
|
||||
size_t len = 0;
|
||||
|
||||
if (!ctx->used) {
|
||||
err = af_alg_wait_for_data(sk, flags);
|
||||
if (!ctx->init || (ctx->more && ctx->used < bs)) {
|
||||
err = af_alg_wait_for_data(sk, flags, bs);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
24
crypto/api.c
24
crypto/api.c
@ -433,8 +433,9 @@ err:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_base);
|
||||
|
||||
void *crypto_create_tfm(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend)
|
||||
void *crypto_create_tfm_node(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend,
|
||||
int node)
|
||||
{
|
||||
char *mem;
|
||||
struct crypto_tfm *tfm = NULL;
|
||||
@ -445,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg,
|
||||
tfmsize = frontend->tfmsize;
|
||||
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
|
||||
|
||||
mem = kzalloc(total, GFP_KERNEL);
|
||||
mem = kzalloc_node(total, GFP_KERNEL, node);
|
||||
if (mem == NULL)
|
||||
goto out_err;
|
||||
|
||||
tfm = (struct crypto_tfm *)(mem + tfmsize);
|
||||
tfm->__crt_alg = alg;
|
||||
tfm->node = node;
|
||||
|
||||
err = frontend->init_tfm(tfm);
|
||||
if (err)
|
||||
@ -472,7 +474,7 @@ out_err:
|
||||
out:
|
||||
return mem;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_create_tfm);
|
||||
EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
|
||||
|
||||
struct crypto_alg *crypto_find_alg(const char *alg_name,
|
||||
const struct crypto_type *frontend,
|
||||
@ -490,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
|
||||
EXPORT_SYMBOL_GPL(crypto_find_alg);
|
||||
|
||||
/*
|
||||
* crypto_alloc_tfm - Locate algorithm and allocate transform
|
||||
* crypto_alloc_tfm_node - Locate algorithm and allocate transform
|
||||
* @alg_name: Name of algorithm
|
||||
* @frontend: Frontend algorithm type
|
||||
* @type: Type of algorithm
|
||||
* @mask: Mask for type comparison
|
||||
* @node: NUMA node in which users desire to put requests, if node is
|
||||
* NUMA_NO_NODE, it means users have no special requirement.
|
||||
*
|
||||
* crypto_alloc_tfm() will first attempt to locate an already loaded
|
||||
* algorithm. If that fails and the kernel supports dynamically loadable
|
||||
@ -509,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg);
|
||||
*
|
||||
* In case of error the return value is an error pointer.
|
||||
*/
|
||||
void *crypto_alloc_tfm(const char *alg_name,
|
||||
const struct crypto_type *frontend, u32 type, u32 mask)
|
||||
|
||||
void *crypto_alloc_tfm_node(const char *alg_name,
|
||||
const struct crypto_type *frontend, u32 type, u32 mask,
|
||||
int node)
|
||||
{
|
||||
void *tfm;
|
||||
int err;
|
||||
@ -524,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name,
|
||||
goto err;
|
||||
}
|
||||
|
||||
tfm = crypto_create_tfm(alg, frontend);
|
||||
tfm = crypto_create_tfm_node(alg, frontend, node);
|
||||
if (!IS_ERR(tfm))
|
||||
return tfm;
|
||||
|
||||
@ -542,7 +548,7 @@ err:
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
|
||||
|
||||
/*
|
||||
* crypto_destroy_tfm - Free crypto transform
|
||||
|
@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst)
|
||||
static int crypto_authenc_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct authenc_instance_ctx *ctx;
|
||||
@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
|
||||
struct skcipher_alg *enc;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
|
||||
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = (auth_base->cra_flags |
|
||||
enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
|
||||
auth_base->cra_priority;
|
||||
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
|
||||
|
@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst)
|
||||
static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct authenc_esn_instance_ctx *ctx;
|
||||
@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||
struct skcipher_alg *enc;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = (auth_base->cra_flags |
|
||||
enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
|
||||
auth_base->cra_priority;
|
||||
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
* - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
|
||||
* - OpenSSL license : https://www.openssl.org/source/license.html
|
||||
* - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
|
||||
* - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* More information about the BLAKE2 hash function can be found at
|
||||
* https://blake2.net.
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
/*
|
||||
* Algorithm Specification
|
||||
* http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
|
||||
* https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
|
||||
*/
|
||||
|
||||
/*
|
||||
|
33
crypto/ccm.c
33
crypto/ccm.c
@ -447,7 +447,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
||||
const char *ctr_name,
|
||||
const char *mac_name)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct ccm_instance_ctx *ictx;
|
||||
@ -455,14 +454,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
||||
struct hash_alg_common *mac;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -470,7 +464,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
||||
ictx = aead_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst),
|
||||
mac_name, 0, CRYPTO_ALG_ASYNC);
|
||||
mac_name, 0, mask | CRYPTO_ALG_ASYNC);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
mac = crypto_spawn_ahash_alg(&ictx->mac);
|
||||
@ -507,7 +501,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
||||
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = (mac->base.cra_priority +
|
||||
ctr->base.cra_priority) / 2;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
@ -712,21 +705,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst)
|
||||
static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct aead_alg *alg;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -759,7 +746,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
@ -878,9 +864,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
struct shash_instance *inst;
|
||||
struct crypto_cipher_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -890,7 +877,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
spawn = shash_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), 0, 0);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
alg = crypto_spawn_cipher_alg(spawn);
|
||||
|
@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst)
|
||||
static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
const char *name, unsigned int ivsize)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct chachapoly_instance_ctx *ctx;
|
||||
@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
if (ivsize > CHACHAPOLY_IV_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = (chacha->base.cra_flags |
|
||||
poly->base.cra_flags) & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = (chacha->base.cra_priority +
|
||||
poly->base.cra_priority) / 2;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
|
@ -225,9 +225,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
struct crypto_cipher_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
unsigned long alignmask;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -237,7 +238,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
spawn = shash_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), 0, 0);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
alg = crypto_spawn_cipher_alg(spawn);
|
||||
|
@ -191,17 +191,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
|
||||
return ictx->queue;
|
||||
}
|
||||
|
||||
static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
|
||||
u32 *mask)
|
||||
static void cryptd_type_and_mask(struct crypto_attr_type *algt,
|
||||
u32 *type, u32 *mask)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
/*
|
||||
* cryptd is allowed to wrap internal algorithms, but in that case the
|
||||
* resulting cryptd instance will be marked as internal as well.
|
||||
*/
|
||||
*type = algt->type & CRYPTO_ALG_INTERNAL;
|
||||
*mask = algt->mask & CRYPTO_ALG_INTERNAL;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return;
|
||||
/* No point in cryptd wrapping an algorithm that's already async. */
|
||||
*mask |= CRYPTO_ALG_ASYNC;
|
||||
|
||||
*type |= algt->type & CRYPTO_ALG_INTERNAL;
|
||||
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
|
||||
*mask |= crypto_algt_inherited_mask(algt);
|
||||
}
|
||||
|
||||
static int cryptd_init_instance(struct crypto_instance *inst,
|
||||
@ -364,6 +367,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst)
|
||||
|
||||
static int cryptd_create_skcipher(struct crypto_template *tmpl,
|
||||
struct rtattr **tb,
|
||||
struct crypto_attr_type *algt,
|
||||
struct cryptd_queue *queue)
|
||||
{
|
||||
struct skcipherd_instance_ctx *ctx;
|
||||
@ -373,10 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
type = 0;
|
||||
mask = CRYPTO_ALG_ASYNC;
|
||||
|
||||
cryptd_check_internal(tb, &type, &mask);
|
||||
cryptd_type_and_mask(algt, &type, &mask);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -395,9 +396,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
|
||||
|
||||
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
|
||||
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
|
||||
inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
|
||||
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
|
||||
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
|
||||
@ -633,16 +633,17 @@ static void cryptd_hash_free(struct ahash_instance *inst)
|
||||
}
|
||||
|
||||
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
struct crypto_attr_type *algt,
|
||||
struct cryptd_queue *queue)
|
||||
{
|
||||
struct hashd_instance_ctx *ctx;
|
||||
struct ahash_instance *inst;
|
||||
struct shash_alg *alg;
|
||||
u32 type = 0;
|
||||
u32 mask = 0;
|
||||
u32 type;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
cryptd_check_internal(tb, &type, &mask);
|
||||
cryptd_type_and_mask(algt, &type, &mask);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -661,10 +662,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL |
|
||||
inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
|
||||
(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
|
||||
CRYPTO_ALG_OPTIONAL_KEY));
|
||||
|
||||
inst->alg.halg.digestsize = alg->digestsize;
|
||||
inst->alg.halg.statesize = alg->statesize;
|
||||
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
|
||||
@ -820,16 +820,17 @@ static void cryptd_aead_free(struct aead_instance *inst)
|
||||
|
||||
static int cryptd_create_aead(struct crypto_template *tmpl,
|
||||
struct rtattr **tb,
|
||||
struct crypto_attr_type *algt,
|
||||
struct cryptd_queue *queue)
|
||||
{
|
||||
struct aead_instance_ctx *ctx;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
u32 type = 0;
|
||||
u32 mask = CRYPTO_ALG_ASYNC;
|
||||
u32 type;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
cryptd_check_internal(tb, &type, &mask);
|
||||
cryptd_type_and_mask(algt, &type, &mask);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -848,8 +849,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
|
||||
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
|
||||
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
|
||||
|
||||
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
|
||||
@ -884,11 +885,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||
return cryptd_create_skcipher(tmpl, tb, &queue);
|
||||
return cryptd_create_skcipher(tmpl, tb, algt, &queue);
|
||||
case CRYPTO_ALG_TYPE_HASH:
|
||||
return cryptd_create_hash(tmpl, tb, &queue);
|
||||
return cryptd_create_hash(tmpl, tb, algt, &queue);
|
||||
case CRYPTO_ALG_TYPE_AEAD:
|
||||
return cryptd_create_aead(tmpl, tb, &queue);
|
||||
return cryptd_create_aead(tmpl, tb, algt, &queue);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
17
crypto/ctr.c
17
crypto/ctr.c
@ -256,29 +256,20 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst)
|
||||
static int crypto_rfc3686_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct skcipher_instance *inst;
|
||||
struct skcipher_alg *alg;
|
||||
struct crypto_skcipher_spawn *spawn;
|
||||
u32 mask;
|
||||
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||
return -EINVAL;
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask) |
|
||||
crypto_requires_off(algt->type, algt->mask,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
|
||||
spawn = skcipher_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
|
||||
@ -310,8 +301,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
|
||||
inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
|
||||
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
|
||||
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
|
||||
|
13
crypto/cts.c
13
crypto/cts.c
@ -325,19 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct crypto_skcipher_spawn *spawn;
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_attr_type *algt;
|
||||
struct skcipher_alg *alg;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -364,7 +358,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
|
38
crypto/dh.c
38
crypto/dh.c
@ -9,6 +9,7 @@
|
||||
#include <crypto/internal/kpp.h>
|
||||
#include <crypto/kpp.h>
|
||||
#include <crypto/dh.h>
|
||||
#include <linux/fips.h>
|
||||
#include <linux/mpi.h>
|
||||
|
||||
struct dh_ctx {
|
||||
@ -179,6 +180,43 @@ static int dh_compute_value(struct kpp_request *req)
|
||||
if (ret)
|
||||
goto err_free_base;
|
||||
|
||||
if (fips_enabled) {
|
||||
/* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */
|
||||
if (req->src) {
|
||||
MPI pone;
|
||||
|
||||
/* z <= 1 */
|
||||
if (mpi_cmp_ui(val, 1) < 1) {
|
||||
ret = -EBADMSG;
|
||||
goto err_free_base;
|
||||
}
|
||||
|
||||
/* z == p - 1 */
|
||||
pone = mpi_alloc(0);
|
||||
|
||||
if (!pone) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_base;
|
||||
}
|
||||
|
||||
ret = mpi_sub_ui(pone, ctx->p, 1);
|
||||
if (!ret && !mpi_cmp(pone, val))
|
||||
ret = -EBADMSG;
|
||||
|
||||
mpi_free(pone);
|
||||
|
||||
if (ret)
|
||||
goto err_free_base;
|
||||
|
||||
/* SP800-56A rev 3 5.6.2.1.3 key check */
|
||||
} else {
|
||||
if (dh_is_pubkey_valid(ctx, val)) {
|
||||
ret = -EAGAIN;
|
||||
goto err_free_val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
|
||||
if (ret)
|
||||
goto err_free_base;
|
||||
|
44
crypto/ecc.c
44
crypto/ecc.c
@ -940,7 +940,7 @@ static bool ecc_point_is_zero(const struct ecc_point *point)
|
||||
}
|
||||
|
||||
/* Point multiplication algorithm using Montgomery's ladder with co-Z
|
||||
* coordinates. From http://eprint.iacr.org/2011/338.pdf
|
||||
* coordinates. From https://eprint.iacr.org/2011/338.pdf
|
||||
*/
|
||||
|
||||
/* Double in place */
|
||||
@ -1404,7 +1404,9 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
|
||||
}
|
||||
|
||||
ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
|
||||
if (ecc_point_is_zero(pk)) {
|
||||
|
||||
/* SP800-56A rev 3 5.6.2.1.3 key check */
|
||||
if (ecc_is_pubkey_valid_full(curve, pk)) {
|
||||
ret = -EAGAIN;
|
||||
goto err_free_point;
|
||||
}
|
||||
@ -1452,6 +1454,33 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
|
||||
|
||||
/* SP800-56A section 5.6.2.3.3 full verification */
|
||||
int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
|
||||
struct ecc_point *pk)
|
||||
{
|
||||
struct ecc_point *nQ;
|
||||
|
||||
/* Checks 1 through 3 */
|
||||
int ret = ecc_is_pubkey_valid_partial(curve, pk);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Check 4: Verify that nQ is the zero point. */
|
||||
nQ = ecc_alloc_point(pk->ndigits);
|
||||
if (!nQ)
|
||||
return -ENOMEM;
|
||||
|
||||
ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits);
|
||||
if (!ecc_point_is_zero(nQ))
|
||||
ret = -EINVAL;
|
||||
|
||||
ecc_free_point(nQ);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_is_pubkey_valid_full);
|
||||
|
||||
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
const u64 *private_key, const u64 *public_key,
|
||||
u64 *secret)
|
||||
@ -1495,11 +1524,16 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
|
||||
ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
|
||||
|
||||
if (ecc_point_is_zero(product)) {
|
||||
ret = -EFAULT;
|
||||
goto err_validity;
|
||||
}
|
||||
|
||||
ecc_swap_digits(product->x, secret, ndigits);
|
||||
|
||||
if (ecc_point_is_zero(product))
|
||||
ret = -EFAULT;
|
||||
|
||||
err_validity:
|
||||
memzero_explicit(priv, sizeof(priv));
|
||||
memzero_explicit(rand_z, sizeof(rand_z));
|
||||
ecc_free_point(product);
|
||||
err_alloc_product:
|
||||
ecc_free_point(pk);
|
||||
|
14
crypto/ecc.h
14
crypto/ecc.h
@ -147,6 +147,20 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
|
||||
struct ecc_point *pk);
|
||||
|
||||
/**
|
||||
* ecc_is_pubkey_valid_full() - Full public key validation
|
||||
*
|
||||
* @curve: elliptic curve domain parameters
|
||||
* @pk: public key as a point
|
||||
*
|
||||
* Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
|
||||
* Public-Key Validation Routine.
|
||||
*
|
||||
* Return: 0 if validation is successful, -EINVAL if validation is failed.
|
||||
*/
|
||||
int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
|
||||
struct ecc_point *pk);
|
||||
|
||||
/**
|
||||
* vli_is_zero() - Determine is vli is zero
|
||||
*
|
||||
|
@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
|
||||
struct aead_instance *inst;
|
||||
int err;
|
||||
|
||||
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
|
||||
inst = aead_geniv_alloc(tmpl, tb);
|
||||
|
||||
if (IS_ERR(inst))
|
||||
return PTR_ERR(inst);
|
||||
|
@ -466,7 +466,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
return PTR_ERR(shash_name);
|
||||
|
||||
type = algt->type & algt->mask;
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
mask = crypto_algt_inherited_mask(algt);
|
||||
|
||||
switch (type) {
|
||||
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||
@ -525,7 +525,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
/* Synchronous hash, e.g., "sha256" */
|
||||
_hash_alg = crypto_alg_mod_lookup(shash_name,
|
||||
CRYPTO_ALG_TYPE_SHASH,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
CRYPTO_ALG_TYPE_MASK | mask);
|
||||
if (IS_ERR(_hash_alg)) {
|
||||
err = PTR_ERR(_hash_alg);
|
||||
goto out_drop_skcipher;
|
||||
@ -557,7 +557,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_free_hash;
|
||||
|
||||
base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
/*
|
||||
* hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its
|
||||
* flags manually.
|
||||
*/
|
||||
base->cra_flags |= (hash_alg->base.cra_flags &
|
||||
CRYPTO_ALG_INHERITED_FLAGS);
|
||||
base->cra_blocksize = block_base->cra_blocksize;
|
||||
base->cra_ctxsize = sizeof(struct essiv_tfm_ctx);
|
||||
base->cra_alignmask = block_base->cra_alignmask;
|
||||
|
40
crypto/gcm.c
40
crypto/gcm.c
@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||
const char *ctr_name,
|
||||
const char *ghash_name)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct gcm_instance_ctx *ctx;
|
||||
@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||
struct hash_alg_common *ghash;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = (ghash->base.cra_flags |
|
||||
ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = (ghash->base.cra_priority +
|
||||
ctr->base.cra_priority) / 2;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
@ -835,21 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst)
|
||||
static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct aead_alg *alg;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -882,7 +868,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
@ -1057,21 +1042,15 @@ static void crypto_rfc4543_free(struct aead_instance *inst)
|
||||
static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
struct crypto_rfc4543_instance_ctx *ctx;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -1104,7 +1083,6 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
|
@ -39,22 +39,19 @@ static void aead_geniv_free(struct aead_instance *inst)
|
||||
}
|
||||
|
||||
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type, u32 mask)
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_attr_type *algt;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
unsigned int ivsize;
|
||||
unsigned int maxauthsize;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -62,11 +59,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
|
||||
spawn = aead_instance_ctx(inst);
|
||||
|
||||
/* Ignore async algorithms if necessary. */
|
||||
mask |= crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), type, mask);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
@ -89,7 +83,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
|
@ -168,11 +168,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
struct crypto_shash_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
struct shash_alg *salg;
|
||||
u32 mask;
|
||||
int err;
|
||||
int ds;
|
||||
int ss;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -182,7 +183,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
spawn = shash_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), 0, 0);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
salg = crypto_spawn_shash_alg(spawn);
|
||||
|
@ -68,13 +68,28 @@ void crypto_remove_final(struct list_head *list);
|
||||
void crypto_shoot_alg(struct crypto_alg *alg);
|
||||
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
||||
u32 mask);
|
||||
void *crypto_create_tfm(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend);
|
||||
void *crypto_create_tfm_node(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend, int node);
|
||||
|
||||
static inline void *crypto_create_tfm(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend)
|
||||
{
|
||||
return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
struct crypto_alg *crypto_find_alg(const char *alg_name,
|
||||
const struct crypto_type *frontend,
|
||||
u32 type, u32 mask);
|
||||
void *crypto_alloc_tfm(const char *alg_name,
|
||||
const struct crypto_type *frontend, u32 type, u32 mask);
|
||||
|
||||
void *crypto_alloc_tfm_node(const char *alg_name,
|
||||
const struct crypto_type *frontend, u32 type, u32 mask,
|
||||
int node);
|
||||
|
||||
static inline void *crypto_alloc_tfm(const char *alg_name,
|
||||
const struct crypto_type *frontend, u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
int crypto_probing_notify(unsigned long val, void *v);
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Design
|
||||
* ======
|
||||
*
|
||||
* See http://www.chronox.de/jent.html
|
||||
* See https://www.chronox.de/jent.html
|
||||
*
|
||||
* License
|
||||
* =======
|
||||
@ -47,7 +47,7 @@
|
||||
|
||||
/*
|
||||
* This Jitterentropy RNG is based on the jitterentropy library
|
||||
* version 2.2.0 provided at http://www.chronox.de/jent.html
|
||||
* version 2.2.0 provided at https://www.chronox.de/jent.html
|
||||
*/
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
|
134
crypto/lrw.c
134
crypto/lrw.c
@ -9,7 +9,7 @@
|
||||
*/
|
||||
/* This implementation is checked against the test vectors in the above
|
||||
* document and by a test vector provided by Ken Buchanan at
|
||||
* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
|
||||
* https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
|
||||
*
|
||||
* The test vectors are included in the testing module tcrypt.[ch] */
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
|
||||
#define LRW_BLOCK_SIZE 16
|
||||
|
||||
struct priv {
|
||||
struct lrw_tfm_ctx {
|
||||
struct crypto_skcipher *child;
|
||||
|
||||
/*
|
||||
@ -49,12 +49,12 @@ struct priv {
|
||||
be128 mulinc[128];
|
||||
};
|
||||
|
||||
struct rctx {
|
||||
struct lrw_request_ctx {
|
||||
be128 t;
|
||||
struct skcipher_request subreq;
|
||||
};
|
||||
|
||||
static inline void setbit128_bbe(void *b, int bit)
|
||||
static inline void lrw_setbit128_bbe(void *b, int bit)
|
||||
{
|
||||
__set_bit(bit ^ (0x80 -
|
||||
#ifdef __BIG_ENDIAN
|
||||
@ -65,10 +65,10 @@ static inline void setbit128_bbe(void *b, int bit)
|
||||
), b);
|
||||
}
|
||||
|
||||
static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(parent);
|
||||
struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
|
||||
struct crypto_skcipher *child = ctx->child;
|
||||
int err, bsize = LRW_BLOCK_SIZE;
|
||||
const u8 *tweak = key + keylen - bsize;
|
||||
@ -92,7 +92,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
|
||||
/* initialize optimization table */
|
||||
for (i = 0; i < 128; i++) {
|
||||
setbit128_bbe(&tmp, i);
|
||||
lrw_setbit128_bbe(&tmp, i);
|
||||
ctx->mulinc[i] = tmp;
|
||||
gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
|
||||
}
|
||||
@ -108,10 +108,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
* For example:
|
||||
*
|
||||
* u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
|
||||
* int i = next_index(&counter);
|
||||
* int i = lrw_next_index(&counter);
|
||||
* // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
|
||||
*/
|
||||
static int next_index(u32 *counter)
|
||||
static int lrw_next_index(u32 *counter)
|
||||
{
|
||||
int i, res = 0;
|
||||
|
||||
@ -135,14 +135,14 @@ static int next_index(u32 *counter)
|
||||
* We compute the tweak masks twice (both before and after the ECB encryption or
|
||||
* decryption) to avoid having to allocate a temporary buffer and/or make
|
||||
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
|
||||
* just doing the next_index() calls again.
|
||||
* just doing the lrw_next_index() calls again.
|
||||
*/
|
||||
static int xor_tweak(struct skcipher_request *req, bool second_pass)
|
||||
static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
|
||||
{
|
||||
const int bs = LRW_BLOCK_SIZE;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct priv *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
be128 t = rctx->t;
|
||||
struct skcipher_walk w;
|
||||
__be32 *iv;
|
||||
@ -178,7 +178,8 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
|
||||
|
||||
/* T <- I*Key2, using the optimization
|
||||
* discussed in the specification */
|
||||
be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
|
||||
be128_xor(&t, &t,
|
||||
&ctx->mulinc[lrw_next_index(counter)]);
|
||||
} while ((avail -= bs) >= bs);
|
||||
|
||||
if (second_pass && w.nbytes == w.total) {
|
||||
@ -194,38 +195,40 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xor_tweak_pre(struct skcipher_request *req)
|
||||
static int lrw_xor_tweak_pre(struct skcipher_request *req)
|
||||
{
|
||||
return xor_tweak(req, false);
|
||||
return lrw_xor_tweak(req, false);
|
||||
}
|
||||
|
||||
static int xor_tweak_post(struct skcipher_request *req)
|
||||
static int lrw_xor_tweak_post(struct skcipher_request *req)
|
||||
{
|
||||
return xor_tweak(req, true);
|
||||
return lrw_xor_tweak(req, true);
|
||||
}
|
||||
|
||||
static void crypt_done(struct crypto_async_request *areq, int err)
|
||||
static void lrw_crypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
|
||||
if (!err) {
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
|
||||
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = xor_tweak_post(req);
|
||||
err = lrw_xor_tweak_post(req);
|
||||
}
|
||||
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void init_crypt(struct skcipher_request *req)
|
||||
static void lrw_init_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
const struct lrw_tfm_ctx *ctx =
|
||||
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->child);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done,
|
||||
req);
|
||||
/* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
|
||||
skcipher_request_set_crypt(subreq, req->dst, req->dst,
|
||||
req->cryptlen, req->iv);
|
||||
@ -237,33 +240,33 @@ static void init_crypt(struct skcipher_request *req)
|
||||
gf128mul_64k_bbe(&rctx->t, ctx->table);
|
||||
}
|
||||
|
||||
static int encrypt(struct skcipher_request *req)
|
||||
static int lrw_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
init_crypt(req);
|
||||
return xor_tweak_pre(req) ?:
|
||||
lrw_init_crypt(req);
|
||||
return lrw_xor_tweak_pre(req) ?:
|
||||
crypto_skcipher_encrypt(subreq) ?:
|
||||
xor_tweak_post(req);
|
||||
lrw_xor_tweak_post(req);
|
||||
}
|
||||
|
||||
static int decrypt(struct skcipher_request *req)
|
||||
static int lrw_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
init_crypt(req);
|
||||
return xor_tweak_pre(req) ?:
|
||||
lrw_init_crypt(req);
|
||||
return lrw_xor_tweak_pre(req) ?:
|
||||
crypto_skcipher_decrypt(subreq) ?:
|
||||
xor_tweak_post(req);
|
||||
lrw_xor_tweak_post(req);
|
||||
}
|
||||
|
||||
static int init_tfm(struct crypto_skcipher *tfm)
|
||||
static int lrw_init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
|
||||
struct priv *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_skcipher *cipher;
|
||||
|
||||
cipher = crypto_spawn_skcipher(spawn);
|
||||
@ -273,45 +276,39 @@ static int init_tfm(struct crypto_skcipher *tfm)
|
||||
ctx->child = cipher;
|
||||
|
||||
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
|
||||
sizeof(struct rctx));
|
||||
sizeof(struct lrw_request_ctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exit_tfm(struct crypto_skcipher *tfm)
|
||||
static void lrw_exit_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (ctx->table)
|
||||
gf128mul_free_64k(ctx->table);
|
||||
crypto_free_skcipher(ctx->child);
|
||||
}
|
||||
|
||||
static void crypto_lrw_free(struct skcipher_instance *inst)
|
||||
static void lrw_free_instance(struct skcipher_instance *inst)
|
||||
{
|
||||
crypto_drop_skcipher(skcipher_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct crypto_skcipher_spawn *spawn;
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_attr_type *algt;
|
||||
struct skcipher_alg *alg;
|
||||
const char *cipher_name;
|
||||
char ecb_name[CRYPTO_MAX_ALG_NAME];
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(cipher_name))
|
||||
@ -379,7 +376,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
} else
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
|
||||
@ -391,43 +387,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
|
||||
LRW_BLOCK_SIZE;
|
||||
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct priv);
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
|
||||
|
||||
inst->alg.init = init_tfm;
|
||||
inst->alg.exit = exit_tfm;
|
||||
inst->alg.init = lrw_init_tfm;
|
||||
inst->alg.exit = lrw_exit_tfm;
|
||||
|
||||
inst->alg.setkey = setkey;
|
||||
inst->alg.encrypt = encrypt;
|
||||
inst->alg.decrypt = decrypt;
|
||||
inst->alg.setkey = lrw_setkey;
|
||||
inst->alg.encrypt = lrw_encrypt;
|
||||
inst->alg.decrypt = lrw_decrypt;
|
||||
|
||||
inst->free = crypto_lrw_free;
|
||||
inst->free = lrw_free_instance;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
crypto_lrw_free(inst);
|
||||
lrw_free_instance(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_tmpl = {
|
||||
static struct crypto_template lrw_tmpl = {
|
||||
.name = "lrw",
|
||||
.create = create,
|
||||
.create = lrw_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_module_init(void)
|
||||
static int __init lrw_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_tmpl);
|
||||
return crypto_register_template(&lrw_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_module_exit(void)
|
||||
static void __exit lrw_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_tmpl);
|
||||
crypto_unregister_template(&lrw_tmpl);
|
||||
}
|
||||
|
||||
subsys_initcall(crypto_module_init);
|
||||
module_exit(crypto_module_exit);
|
||||
subsys_initcall(lrw_module_init);
|
||||
module_exit(lrw_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("LRW block cipher mode");
|
||||
|
@ -226,18 +226,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst,
|
||||
}
|
||||
|
||||
static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
u32 type, u32 mask)
|
||||
struct crypto_attr_type *algt)
|
||||
{
|
||||
struct pcrypt_instance_ctx *ctx;
|
||||
struct crypto_attr_type *algt;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
u32 mask = crypto_algt_inherited_mask(algt);
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
@ -254,7 +250,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
goto err_free_inst;
|
||||
|
||||
err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), 0, 0);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
@ -263,7 +259,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC;
|
||||
|
||||
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
|
||||
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
||||
@ -298,7 +294,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_AEAD:
|
||||
return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
|
||||
return pcrypt_create_aead(tmpl, tb, algt);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
@ -320,7 +316,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
*pinst = padata_alloc_possible(name);
|
||||
*pinst = padata_alloc(name);
|
||||
if (!*pinst)
|
||||
return ret;
|
||||
|
||||
@ -331,12 +327,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pcrypt_fini_padata(struct padata_instance *pinst)
|
||||
{
|
||||
padata_stop(pinst);
|
||||
padata_free(pinst);
|
||||
}
|
||||
|
||||
static struct crypto_template pcrypt_tmpl = {
|
||||
.name = "pcrypt",
|
||||
.create = pcrypt_create,
|
||||
@ -359,13 +349,10 @@ static int __init pcrypt_init(void)
|
||||
if (err)
|
||||
goto err_deinit_pencrypt;
|
||||
|
||||
padata_start(pencrypt);
|
||||
padata_start(pdecrypt);
|
||||
|
||||
return crypto_register_template(&pcrypt_tmpl);
|
||||
|
||||
err_deinit_pencrypt:
|
||||
pcrypt_fini_padata(pencrypt);
|
||||
padata_free(pencrypt);
|
||||
err_unreg_kset:
|
||||
kset_unregister(pcrypt_kset);
|
||||
err:
|
||||
@ -376,8 +363,8 @@ static void __exit pcrypt_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&pcrypt_tmpl);
|
||||
|
||||
pcrypt_fini_padata(pencrypt);
|
||||
pcrypt_fini_padata(pdecrypt);
|
||||
padata_free(pencrypt);
|
||||
padata_free(pdecrypt);
|
||||
|
||||
kset_unregister(pcrypt_kset);
|
||||
}
|
||||
|
@ -596,7 +596,6 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
|
||||
|
||||
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct akcipher_instance *inst;
|
||||
struct pkcs1pad_inst_ctx *ctx;
|
||||
@ -604,14 +603,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
const char *hash_name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
@ -658,7 +652,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
goto err_free_inst;
|
||||
}
|
||||
|
||||
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
|
||||
|
||||
|
@ -9,8 +9,8 @@
|
||||
* Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream
|
||||
* Cipher Project. It is designed by Daniel J. Bernstein <djb@cr.yp.to>.
|
||||
* More information about eSTREAM and Salsa20 can be found here:
|
||||
* http://www.ecrypt.eu.org/stream/
|
||||
* http://cr.yp.to/snuffle.html
|
||||
* https://www.ecrypt.eu.org/stream/
|
||||
* https://cr.yp.to/snuffle.html
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
|
@ -138,7 +138,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
struct aead_instance *inst;
|
||||
int err;
|
||||
|
||||
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
|
||||
inst = aead_geniv_alloc(tmpl, tb);
|
||||
|
||||
if (IS_ERR(inst))
|
||||
return PTR_ERR(inst);
|
||||
@ -164,23 +164,9 @@ free_inst:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
return seqiv_aead_create(tmpl, tb);
|
||||
}
|
||||
|
||||
static struct crypto_template seqiv_tmpl = {
|
||||
.name = "seqiv",
|
||||
.create = seqiv_create,
|
||||
.create = seqiv_aead_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
* Cryptographic API.
|
||||
*
|
||||
* SHA-3, as specified in
|
||||
* http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
|
||||
* https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
|
||||
*
|
||||
* SHA-3 code by Jeff Garzik <jeff@garzik.org>
|
||||
* Ard Biesheuvel <ard.biesheuvel@linaro.org>
|
||||
|
@ -171,7 +171,8 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
|
||||
drvname) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_free_salg;
|
||||
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC;
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
(ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
|
||||
alg->base.cra_priority = ialg->base.cra_priority;
|
||||
alg->base.cra_blocksize = ialg->base.cra_blocksize;
|
||||
alg->base.cra_alignmask = ialg->base.cra_alignmask;
|
||||
@ -417,7 +418,8 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname,
|
||||
drvname) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_free_salg;
|
||||
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC;
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
(ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
|
||||
alg->base.cra_priority = ialg->base.cra_priority;
|
||||
alg->base.cra_blocksize = ialg->base.cra_blocksize;
|
||||
alg->base.cra_alignmask = ialg->base.cra_alignmask;
|
||||
|
@ -934,22 +934,15 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst)
|
||||
struct skcipher_instance *skcipher_alloc_instance_simple(
|
||||
struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_cipher_spawn *spawn;
|
||||
struct crypto_alg *cipher_alg;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mask = crypto_requires_off(algt->type, algt->mask,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
|
@ -3916,7 +3916,7 @@ static const struct hash_testvec hmac_sm3_tv_template[] = {
|
||||
};
|
||||
|
||||
/*
|
||||
* SHA1 test vectors from from FIPS PUB 180-1
|
||||
* SHA1 test vectors from FIPS PUB 180-1
|
||||
* Long vector from CAVS 5.0
|
||||
*/
|
||||
static const struct hash_testvec sha1_tv_template[] = {
|
||||
@ -4103,7 +4103,7 @@ static const struct hash_testvec sha1_tv_template[] = {
|
||||
|
||||
|
||||
/*
|
||||
* SHA224 test vectors from from FIPS PUB 180-2
|
||||
* SHA224 test vectors from FIPS PUB 180-2
|
||||
*/
|
||||
static const struct hash_testvec sha224_tv_template[] = {
|
||||
{
|
||||
@ -4273,7 +4273,7 @@ static const struct hash_testvec sha224_tv_template[] = {
|
||||
};
|
||||
|
||||
/*
|
||||
* SHA256 test vectors from from NIST
|
||||
* SHA256 test vectors from NIST
|
||||
*/
|
||||
static const struct hash_testvec sha256_tv_template[] = {
|
||||
{
|
||||
@ -4442,7 +4442,7 @@ static const struct hash_testvec sha256_tv_template[] = {
|
||||
};
|
||||
|
||||
/*
|
||||
* SHA384 test vectors from from NIST and kerneli
|
||||
* SHA384 test vectors from NIST and kerneli
|
||||
*/
|
||||
static const struct hash_testvec sha384_tv_template[] = {
|
||||
{
|
||||
@ -4632,7 +4632,7 @@ static const struct hash_testvec sha384_tv_template[] = {
|
||||
};
|
||||
|
||||
/*
|
||||
* SHA512 test vectors from from NIST and kerneli
|
||||
* SHA512 test vectors from NIST and kerneli
|
||||
*/
|
||||
static const struct hash_testvec sha512_tv_template[] = {
|
||||
{
|
||||
|
@ -620,9 +620,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
struct shash_instance *inst;
|
||||
struct crypto_cipher_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -632,7 +633,7 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
spawn = shash_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), 0, 0);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
alg = crypto_spawn_cipher_alg(spawn);
|
||||
|
@ -191,9 +191,10 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
struct crypto_cipher_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
unsigned long alignmask;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -203,7 +204,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
spawn = shash_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), 0, 0);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
alg = crypto_spawn_cipher_alg(spawn);
|
||||
|
154
crypto/xts.c
154
crypto/xts.c
@ -20,7 +20,7 @@
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
|
||||
struct priv {
|
||||
struct xts_tfm_ctx {
|
||||
struct crypto_skcipher *child;
|
||||
struct crypto_cipher *tweak;
|
||||
};
|
||||
@ -30,17 +30,17 @@ struct xts_instance_ctx {
|
||||
char name[CRYPTO_MAX_ALG_NAME];
|
||||
};
|
||||
|
||||
struct rctx {
|
||||
struct xts_request_ctx {
|
||||
le128 t;
|
||||
struct scatterlist *tail;
|
||||
struct scatterlist sg[2];
|
||||
struct skcipher_request subreq;
|
||||
};
|
||||
|
||||
static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(parent);
|
||||
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
|
||||
struct crypto_skcipher *child;
|
||||
struct crypto_cipher *tweak;
|
||||
int err;
|
||||
@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
|
||||
* just doing the gf128mul_x_ble() calls again.
|
||||
*/
|
||||
static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
|
||||
static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
|
||||
bool enc)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
|
||||
const int bs = XTS_BLOCK_SIZE;
|
||||
@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xor_tweak_pre(struct skcipher_request *req, bool enc)
|
||||
static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
|
||||
{
|
||||
return xor_tweak(req, false, enc);
|
||||
return xts_xor_tweak(req, false, enc);
|
||||
}
|
||||
|
||||
static int xor_tweak_post(struct skcipher_request *req, bool enc)
|
||||
static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
|
||||
{
|
||||
return xor_tweak(req, true, enc);
|
||||
return xts_xor_tweak(req, true, enc);
|
||||
}
|
||||
|
||||
static void cts_done(struct crypto_async_request *areq, int err)
|
||||
static void xts_cts_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
le128 b;
|
||||
|
||||
if (!err) {
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
|
||||
scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
|
||||
le128_xor(&b, &rctx->t, &b);
|
||||
@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err)
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int cts_final(struct skcipher_request *req,
|
||||
int (*crypt)(struct skcipher_request *req))
|
||||
static int xts_cts_final(struct skcipher_request *req,
|
||||
int (*crypt)(struct skcipher_request *req))
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
const struct xts_tfm_ctx *ctx =
|
||||
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
int tail = req->cryptlen % XTS_BLOCK_SIZE;
|
||||
le128 b[2];
|
||||
@ -169,7 +171,7 @@ static int cts_final(struct skcipher_request *req,
|
||||
offset - XTS_BLOCK_SIZE);
|
||||
|
||||
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
|
||||
memcpy(b + 1, b, tail);
|
||||
b[1] = b[0];
|
||||
scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
|
||||
|
||||
le128_xor(b, &rctx->t, b);
|
||||
@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req,
|
||||
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->child);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
|
||||
req);
|
||||
skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
|
||||
XTS_BLOCK_SIZE, NULL);
|
||||
|
||||
@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void encrypt_done(struct crypto_async_request *areq, int err)
|
||||
static void xts_encrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
|
||||
if (!err) {
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
|
||||
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = xor_tweak_post(req, true);
|
||||
err = xts_xor_tweak_post(req, true);
|
||||
|
||||
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
|
||||
err = cts_final(req, crypto_skcipher_encrypt);
|
||||
err = xts_cts_final(req, crypto_skcipher_encrypt);
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
}
|
||||
@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void decrypt_done(struct crypto_async_request *areq, int err)
|
||||
static void xts_decrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
|
||||
if (!err) {
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
|
||||
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = xor_tweak_post(req, false);
|
||||
err = xts_xor_tweak_post(req, false);
|
||||
|
||||
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
|
||||
err = cts_final(req, crypto_skcipher_decrypt);
|
||||
err = xts_cts_final(req, crypto_skcipher_decrypt);
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
}
|
||||
@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
|
||||
static int xts_init_crypt(struct skcipher_request *req,
|
||||
crypto_completion_t compl)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
const struct xts_tfm_ctx *ctx =
|
||||
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
if (req->cryptlen < XTS_BLOCK_SIZE)
|
||||
@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encrypt(struct skcipher_request *req)
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
int err;
|
||||
|
||||
err = init_crypt(req, encrypt_done) ?:
|
||||
xor_tweak_pre(req, true) ?:
|
||||
err = xts_init_crypt(req, xts_encrypt_done) ?:
|
||||
xts_xor_tweak_pre(req, true) ?:
|
||||
crypto_skcipher_encrypt(subreq) ?:
|
||||
xor_tweak_post(req, true);
|
||||
xts_xor_tweak_post(req, true);
|
||||
|
||||
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
|
||||
return err;
|
||||
|
||||
return cts_final(req, crypto_skcipher_encrypt);
|
||||
return xts_cts_final(req, crypto_skcipher_encrypt);
|
||||
}
|
||||
|
||||
static int decrypt(struct skcipher_request *req)
|
||||
static int xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
int err;
|
||||
|
||||
err = init_crypt(req, decrypt_done) ?:
|
||||
xor_tweak_pre(req, false) ?:
|
||||
err = xts_init_crypt(req, xts_decrypt_done) ?:
|
||||
xts_xor_tweak_pre(req, false) ?:
|
||||
crypto_skcipher_decrypt(subreq) ?:
|
||||
xor_tweak_post(req, false);
|
||||
xts_xor_tweak_post(req, false);
|
||||
|
||||
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
|
||||
return err;
|
||||
|
||||
return cts_final(req, crypto_skcipher_decrypt);
|
||||
return xts_cts_final(req, crypto_skcipher_decrypt);
|
||||
}
|
||||
|
||||
static int init_tfm(struct crypto_skcipher *tfm)
|
||||
static int xts_init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
|
||||
struct priv *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_skcipher *child;
|
||||
struct crypto_cipher *tweak;
|
||||
|
||||
@ -309,41 +314,39 @@ static int init_tfm(struct crypto_skcipher *tfm)
|
||||
ctx->tweak = tweak;
|
||||
|
||||
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
|
||||
sizeof(struct rctx));
|
||||
sizeof(struct xts_request_ctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exit_tfm(struct crypto_skcipher *tfm)
|
||||
static void xts_exit_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_free_skcipher(ctx->child);
|
||||
crypto_free_cipher(ctx->tweak);
|
||||
}
|
||||
|
||||
static void crypto_xts_free(struct skcipher_instance *inst)
|
||||
static void xts_free_instance(struct skcipher_instance *inst)
|
||||
{
|
||||
crypto_drop_skcipher(skcipher_instance_ctx(inst));
|
||||
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ictx->spawn);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_attr_type *algt;
|
||||
struct xts_instance_ctx *ctx;
|
||||
struct skcipher_alg *alg;
|
||||
const char *cipher_name;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||
return -EINVAL;
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(cipher_name))
|
||||
@ -355,10 +358,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
ctx = skcipher_instance_ctx(inst);
|
||||
|
||||
mask = crypto_requires_off(algt->type, algt->mask,
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC);
|
||||
|
||||
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
|
||||
cipher_name, 0, mask);
|
||||
if (err == -ENOENT) {
|
||||
@ -415,7 +414,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
} else
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
|
||||
@ -425,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
|
||||
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
|
||||
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct priv);
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
|
||||
|
||||
inst->alg.init = init_tfm;
|
||||
inst->alg.exit = exit_tfm;
|
||||
inst->alg.init = xts_init_tfm;
|
||||
inst->alg.exit = xts_exit_tfm;
|
||||
|
||||
inst->alg.setkey = setkey;
|
||||
inst->alg.encrypt = encrypt;
|
||||
inst->alg.decrypt = decrypt;
|
||||
inst->alg.setkey = xts_setkey;
|
||||
inst->alg.encrypt = xts_encrypt;
|
||||
inst->alg.decrypt = xts_decrypt;
|
||||
|
||||
inst->free = crypto_xts_free;
|
||||
inst->free = xts_free_instance;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
crypto_xts_free(inst);
|
||||
xts_free_instance(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_tmpl = {
|
||||
static struct crypto_template xts_tmpl = {
|
||||
.name = "xts",
|
||||
.create = create,
|
||||
.create = xts_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_module_init(void)
|
||||
static int __init xts_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_tmpl);
|
||||
return crypto_register_template(&xts_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_module_exit(void)
|
||||
static void __exit xts_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_tmpl);
|
||||
crypto_unregister_template(&xts_tmpl);
|
||||
}
|
||||
|
||||
subsys_initcall(crypto_module_init);
|
||||
module_exit(crypto_module_exit);
|
||||
subsys_initcall(xts_module_init);
|
||||
module_exit(xts_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("XTS block cipher mode");
|
||||
|
@ -74,6 +74,16 @@ config HW_RANDOM_ATMEL
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_BA431
|
||||
tristate "Silex Insight BA431 Random Number Generator support"
|
||||
depends on HAS_IOMEM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number
|
||||
Generator hardware based on Silex Insight BA431 IP.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called ba431-rng.
|
||||
|
||||
config HW_RANDOM_BCM2835
|
||||
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
|
||||
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
|
||||
@ -245,7 +255,7 @@ config HW_RANDOM_MXC_RNGA
|
||||
config HW_RANDOM_IMX_RNGC
|
||||
tristate "Freescale i.MX RNGC Random Number Generator"
|
||||
depends on HAS_IOMEM && HAVE_CLK
|
||||
depends on SOC_IMX25 || COMPILE_TEST
|
||||
depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number
|
||||
@ -257,6 +267,21 @@ config HW_RANDOM_IMX_RNGC
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_INGENIC_RNG
|
||||
tristate "Ingenic Random Number Generator support"
|
||||
depends on HW_RANDOM
|
||||
depends on MACH_JZ4780 || MACH_X1000
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number Generator
|
||||
hardware found in ingenic JZ4780 and X1000 SoC. MIPS Creator CI20 uses
|
||||
JZ4780 SoC, YSH & ATIL CU1000-Neo uses X1000 SoC.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called ingenic-rng.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_NOMADIK
|
||||
tristate "ST-Ericsson Nomadik Random Number Generator support"
|
||||
depends on ARCH_NOMADIK
|
||||
|
@ -9,6 +9,7 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
|
||||
n2-rng-y := n2-drv.o n2-asm.o
|
||||
@ -22,6 +23,7 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
|
||||
obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
|
||||
obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
|
||||
|
235
drivers/char/hw_random/ba431-rng.c
Normal file
235
drivers/char/hw_random/ba431-rng.c
Normal file
@ -0,0 +1,235 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2020 Silex Insight
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#define BA431_RESET_DELAY 1 /* usec */
|
||||
#define BA431_RESET_READ_STATUS_TIMEOUT 1000 /* usec */
|
||||
#define BA431_RESET_READ_STATUS_INTERVAL 10 /* usec */
|
||||
#define BA431_READ_RETRY_INTERVAL 1 /* usec */
|
||||
|
||||
#define BA431_REG_CTRL 0x00
|
||||
#define BA431_REG_FIFO_LEVEL 0x04
|
||||
#define BA431_REG_STATUS 0x30
|
||||
#define BA431_REG_FIFODATA 0x80
|
||||
|
||||
#define BA431_CTRL_ENABLE BIT(0)
|
||||
#define BA431_CTRL_SOFTRESET BIT(8)
|
||||
|
||||
#define BA431_STATUS_STATE_MASK (BIT(1) | BIT(2) | BIT(3))
|
||||
#define BA431_STATUS_STATE_OFFSET 1
|
||||
|
||||
enum ba431_state {
|
||||
BA431_STATE_RESET,
|
||||
BA431_STATE_STARTUP,
|
||||
BA431_STATE_FIFOFULLON,
|
||||
BA431_STATE_FIFOFULLOFF,
|
||||
BA431_STATE_RUNNING,
|
||||
BA431_STATE_ERROR
|
||||
};
|
||||
|
||||
struct ba431_trng {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct hwrng rng;
|
||||
atomic_t reset_pending;
|
||||
struct work_struct reset_work;
|
||||
};
|
||||
|
||||
static inline u32 ba431_trng_read_reg(struct ba431_trng *ba431, u32 reg)
|
||||
{
|
||||
return ioread32(ba431->base + reg);
|
||||
}
|
||||
|
||||
static inline void ba431_trng_write_reg(struct ba431_trng *ba431, u32 reg,
|
||||
u32 val)
|
||||
{
|
||||
iowrite32(val, ba431->base + reg);
|
||||
}
|
||||
|
||||
static inline enum ba431_state ba431_trng_get_state(struct ba431_trng *ba431)
|
||||
{
|
||||
u32 status = ba431_trng_read_reg(ba431, BA431_REG_STATUS);
|
||||
|
||||
return (status & BA431_STATUS_STATE_MASK) >> BA431_STATUS_STATE_OFFSET;
|
||||
}
|
||||
|
||||
static int ba431_trng_is_in_error(struct ba431_trng *ba431)
|
||||
{
|
||||
enum ba431_state state = ba431_trng_get_state(ba431);
|
||||
|
||||
if ((state < BA431_STATE_STARTUP) ||
|
||||
(state >= BA431_STATE_ERROR))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ba431_trng_reset(struct ba431_trng *ba431)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Disable interrupts, random generation and enable the softreset */
|
||||
ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_SOFTRESET);
|
||||
udelay(BA431_RESET_DELAY);
|
||||
ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_ENABLE);
|
||||
|
||||
/* Wait until the state changed */
|
||||
if (readx_poll_timeout(ba431_trng_is_in_error, ba431, ret, !ret,
|
||||
BA431_RESET_READ_STATUS_INTERVAL,
|
||||
BA431_RESET_READ_STATUS_TIMEOUT)) {
|
||||
dev_err(ba431->dev, "reset failed (state: %d)\n",
|
||||
ba431_trng_get_state(ba431));
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
dev_info(ba431->dev, "reset done\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ba431_trng_reset_work(struct work_struct *work)
|
||||
{
|
||||
struct ba431_trng *ba431 = container_of(work, struct ba431_trng,
|
||||
reset_work);
|
||||
ba431_trng_reset(ba431);
|
||||
atomic_set(&ba431->reset_pending, 0);
|
||||
}
|
||||
|
||||
static void ba431_trng_schedule_reset(struct ba431_trng *ba431)
|
||||
{
|
||||
if (atomic_cmpxchg(&ba431->reset_pending, 0, 1))
|
||||
return;
|
||||
|
||||
schedule_work(&ba431->reset_work);
|
||||
}
|
||||
|
||||
static int ba431_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
|
||||
{
|
||||
struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
|
||||
u32 *data = buf;
|
||||
unsigned int level, i;
|
||||
int n = 0;
|
||||
|
||||
while (max > 0) {
|
||||
level = ba431_trng_read_reg(ba431, BA431_REG_FIFO_LEVEL);
|
||||
if (!level) {
|
||||
if (ba431_trng_is_in_error(ba431)) {
|
||||
ba431_trng_schedule_reset(ba431);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!wait)
|
||||
break;
|
||||
|
||||
udelay(BA431_READ_RETRY_INTERVAL);
|
||||
continue;
|
||||
}
|
||||
|
||||
i = level;
|
||||
do {
|
||||
data[n++] = ba431_trng_read_reg(ba431,
|
||||
BA431_REG_FIFODATA);
|
||||
max -= sizeof(*data);
|
||||
} while (--i && (max > 0));
|
||||
|
||||
if (ba431_trng_is_in_error(ba431)) {
|
||||
n -= (level - i);
|
||||
ba431_trng_schedule_reset(ba431);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
n *= sizeof(data);
|
||||
return (n || !wait) ? n : -EIO;
|
||||
}
|
||||
|
||||
static void ba431_trng_cleanup(struct hwrng *rng)
|
||||
{
|
||||
struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
|
||||
|
||||
ba431_trng_write_reg(ba431, BA431_REG_CTRL, 0);
|
||||
cancel_work_sync(&ba431->reset_work);
|
||||
}
|
||||
|
||||
static int ba431_trng_init(struct hwrng *rng)
|
||||
{
|
||||
struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
|
||||
|
||||
return ba431_trng_reset(ba431);
|
||||
}
|
||||
|
||||
static int ba431_trng_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ba431_trng *ba431;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
|
||||
if (!ba431)
|
||||
return -ENOMEM;
|
||||
|
||||
ba431->dev = &pdev->dev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ba431->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(ba431->base))
|
||||
return PTR_ERR(ba431->base);
|
||||
|
||||
atomic_set(&ba431->reset_pending, 0);
|
||||
INIT_WORK(&ba431->reset_work, ba431_trng_reset_work);
|
||||
ba431->rng.name = pdev->name;
|
||||
ba431->rng.init = ba431_trng_init;
|
||||
ba431->rng.cleanup = ba431_trng_cleanup;
|
||||
ba431->rng.read = ba431_trng_read;
|
||||
|
||||
platform_set_drvdata(pdev, ba431);
|
||||
|
||||
ret = hwrng_register(&ba431->rng);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "BA431 TRNG registered\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ba431_trng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ba431_trng *ba431 = platform_get_drvdata(pdev);
|
||||
|
||||
hwrng_unregister(&ba431->rng);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ba431_trng_dt_ids[] = {
|
||||
{ .compatible = "silex-insight,ba431-rng", .data = NULL },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids);
|
||||
|
||||
static struct platform_driver ba431_trng_driver = {
|
||||
.driver = {
|
||||
.name = "ba431-rng",
|
||||
.of_match_table = ba431_trng_dt_ids,
|
||||
},
|
||||
.probe = ba431_trng_probe,
|
||||
.remove = ba431_trng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(ba431_trng_driver);
|
||||
|
||||
MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
|
||||
MODULE_DESCRIPTION("TRNG driver for Silex Insight BA431");
|
||||
MODULE_LICENSE("GPL");
|
@ -139,7 +139,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct bcm2835_rng_of_data *of_data;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
const struct of_device_id *rng_id;
|
||||
struct bcm2835_rng_priv *priv;
|
||||
int err;
|
||||
@ -166,7 +165,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
||||
priv->rng.cleanup = bcm2835_rng_cleanup;
|
||||
|
||||
if (dev_of_node(dev)) {
|
||||
rng_id = of_match_node(bcm2835_rng_of_match, np);
|
||||
rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
|
||||
if (!rng_id)
|
||||
return -EINVAL;
|
||||
|
||||
@ -188,7 +187,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
||||
|
||||
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
|
||||
|
||||
static struct platform_device_id bcm2835_rng_devtype[] = {
|
||||
static const struct platform_device_id bcm2835_rng_devtype[] = {
|
||||
{ .name = "bcm2835-rng" },
|
||||
{ .name = "bcm63xx-rng" },
|
||||
{ /* sentinel */ }
|
||||
|
@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
|
||||
|
||||
static int __init hwrng_modinit(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
int ret;
|
||||
|
||||
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
|
||||
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
|
||||
|
@ -99,7 +99,7 @@ static int hisi_rng_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id hisi_rng_dt_ids[] = {
|
||||
static const struct of_device_id hisi_rng_dt_ids[] __maybe_unused = {
|
||||
{ .compatible = "hisilicon,hip04-rng" },
|
||||
{ .compatible = "hisilicon,hip05-rng" },
|
||||
{ }
|
||||
|
154
drivers/char/hw_random/ingenic-rng.c
Normal file
154
drivers/char/hw_random/ingenic-rng.c
Normal file
@ -0,0 +1,154 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Ingenic Random Number Generator driver
|
||||
* Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
|
||||
* Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/* RNG register offsets */
|
||||
#define RNG_REG_ERNG_OFFSET 0x0
|
||||
#define RNG_REG_RNG_OFFSET 0x4
|
||||
|
||||
/* bits within the ERND register */
|
||||
#define ERNG_READY BIT(31)
|
||||
#define ERNG_ENABLE BIT(0)
|
||||
|
||||
enum ingenic_rng_version {
|
||||
ID_JZ4780,
|
||||
ID_X1000,
|
||||
};
|
||||
|
||||
/* Device associated memory */
|
||||
struct ingenic_rng {
|
||||
enum ingenic_rng_version version;
|
||||
|
||||
void __iomem *base;
|
||||
struct hwrng rng;
|
||||
};
|
||||
|
||||
static int ingenic_rng_init(struct hwrng *rng)
|
||||
{
|
||||
struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
|
||||
|
||||
writel(ERNG_ENABLE, priv->base + RNG_REG_ERNG_OFFSET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ingenic_rng_cleanup(struct hwrng *rng)
|
||||
{
|
||||
struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
|
||||
|
||||
writel(0, priv->base + RNG_REG_ERNG_OFFSET);
|
||||
}
|
||||
|
||||
static int ingenic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
|
||||
{
|
||||
struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
|
||||
u32 *data = buf;
|
||||
u32 status;
|
||||
int ret;
|
||||
|
||||
if (priv->version >= ID_X1000) {
|
||||
ret = readl_poll_timeout(priv->base + RNG_REG_ERNG_OFFSET, status,
|
||||
status & ERNG_READY, 10, 1000);
|
||||
if (ret == -ETIMEDOUT) {
|
||||
pr_err("%s: Wait for RNG data ready timeout\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* A delay is required so that the current RNG data is not bit shifted
|
||||
* version of previous RNG data which could happen if random data is
|
||||
* read continuously from this device.
|
||||
*/
|
||||
udelay(20);
|
||||
}
|
||||
|
||||
*data = readl(priv->base + RNG_REG_RNG_OFFSET);
|
||||
|
||||
return 4;
|
||||
}
|
||||
|
||||
static int ingenic_rng_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ingenic_rng *priv;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->base)) {
|
||||
pr_err("%s: Failed to map RNG registers\n", __func__);
|
||||
ret = PTR_ERR(priv->base);
|
||||
goto err_free_rng;
|
||||
}
|
||||
|
||||
priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev);
|
||||
|
||||
priv->rng.name = pdev->name;
|
||||
priv->rng.init = ingenic_rng_init;
|
||||
priv->rng.cleanup = ingenic_rng_cleanup;
|
||||
priv->rng.read = ingenic_rng_read;
|
||||
|
||||
ret = hwrng_register(&priv->rng);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to register hwrng\n");
|
||||
goto err_free_rng;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
dev_info(&pdev->dev, "Ingenic RNG driver registered\n");
|
||||
return 0;
|
||||
|
||||
err_free_rng:
|
||||
kfree(priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ingenic_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ingenic_rng *priv = platform_get_drvdata(pdev);
|
||||
|
||||
hwrng_unregister(&priv->rng);
|
||||
|
||||
writel(0, priv->base + RNG_REG_ERNG_OFFSET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ingenic_rng_of_match[] = {
|
||||
{ .compatible = "ingenic,jz4780-rng", .data = (void *) ID_JZ4780 },
|
||||
{ .compatible = "ingenic,x1000-rng", .data = (void *) ID_X1000 },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
|
||||
|
||||
static struct platform_driver ingenic_rng_driver = {
|
||||
.probe = ingenic_rng_probe,
|
||||
.remove = ingenic_rng_remove,
|
||||
.driver = {
|
||||
.name = "ingenic-rng",
|
||||
.of_match_table = ingenic_rng_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(ingenic_rng_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>");
|
||||
MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
|
||||
MODULE_DESCRIPTION("Ingenic Random Number Generator driver");
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Random Number Generator driver for the Keystone SOC
|
||||
*
|
||||
* Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com
|
||||
*
|
||||
* Authors: Sandeep Nair
|
||||
* Vitaly Andrianov
|
||||
|
@ -76,7 +76,7 @@ static int nmk_rng_remove(struct amba_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct amba_id nmk_rng_ids[] = {
|
||||
static const struct amba_id nmk_rng_ids[] = {
|
||||
{
|
||||
.id = 0x000805e1,
|
||||
.mask = 0x000fffff, /* top bits are rev and cfg: accept all */
|
||||
|
@ -161,7 +161,7 @@ static const struct dev_pm_ops npcm_rng_pm_ops = {
|
||||
pm_runtime_force_resume)
|
||||
};
|
||||
|
||||
static const struct of_device_id rng_dt_id[] = {
|
||||
static const struct of_device_id rng_dt_id[] __maybe_unused = {
|
||||
{ .compatible = "nuvoton,npcm750-rng", },
|
||||
{},
|
||||
};
|
||||
|
@ -33,7 +33,7 @@ static int octeon_rng_init(struct hwrng *rng)
|
||||
ctl.u64 = 0;
|
||||
ctl.s.ent_en = 1; /* Enable the entropy source. */
|
||||
ctl.s.rng_en = 1; /* Enable the RNG hardware. */
|
||||
cvmx_write_csr((u64)p->control_status, ctl.u64);
|
||||
cvmx_write_csr((__force u64)p->control_status, ctl.u64);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -44,14 +44,14 @@ static void octeon_rng_cleanup(struct hwrng *rng)
|
||||
|
||||
ctl.u64 = 0;
|
||||
/* Disable everything. */
|
||||
cvmx_write_csr((u64)p->control_status, ctl.u64);
|
||||
cvmx_write_csr((__force u64)p->control_status, ctl.u64);
|
||||
}
|
||||
|
||||
static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
{
|
||||
struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
|
||||
|
||||
*data = cvmx_read64_uint32((u64)p->result);
|
||||
*data = cvmx_read64_uint32((__force u64)p->result);
|
||||
return sizeof(u32);
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/of.h>
|
||||
@ -243,7 +244,6 @@ static struct omap_rng_pdata omap2_rng_pdata = {
|
||||
.cleanup = omap2_rng_cleanup,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
|
||||
{
|
||||
return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
|
||||
@ -358,7 +358,7 @@ static struct omap_rng_pdata eip76_rng_pdata = {
|
||||
.cleanup = omap4_rng_cleanup,
|
||||
};
|
||||
|
||||
static const struct of_device_id omap_rng_of_match[] = {
|
||||
static const struct of_device_id omap_rng_of_match[] __maybe_unused = {
|
||||
{
|
||||
.compatible = "ti,omap2-rng",
|
||||
.data = &omap2_rng_pdata,
|
||||
@ -418,13 +418,6 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
|
||||
{
|
||||
|
@ -119,7 +119,7 @@ static int pic32_rng_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id pic32_rng_of_match[] = {
|
||||
static const struct of_device_id pic32_rng_of_match[] __maybe_unused = {
|
||||
{ .compatible = "microchip,pic32mzda-rng", },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
@ -121,7 +122,7 @@ static int st_rng_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id st_rng_match[] = {
|
||||
static const struct of_device_id st_rng_match[] __maybe_unused = {
|
||||
{ .compatible = "st,rng" },
|
||||
{},
|
||||
};
|
||||
|
@ -195,7 +195,7 @@ static int virtrng_restore(struct virtio_device *vdev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct virtio_device_id id_table[] = {
|
||||
static const struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
|
||||
{ 0 },
|
||||
};
|
||||
|
@ -624,6 +624,8 @@ config CRYPTO_DEV_QCE_SKCIPHER
|
||||
config CRYPTO_DEV_QCE_SHA
|
||||
bool
|
||||
depends on CRYPTO_DEV_QCE
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_SHA256
|
||||
|
||||
choice
|
||||
prompt "Algorithms enabled for QCE acceleration"
|
||||
@ -756,10 +758,9 @@ config CRYPTO_DEV_ZYNQMP_AES
|
||||
config CRYPTO_DEV_MEDIATEK
|
||||
tristate "MediaTek's EIP97 Cryptographic Engine driver"
|
||||
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_CTR
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
@ -865,4 +866,18 @@ source "drivers/crypto/hisilicon/Kconfig"
|
||||
|
||||
source "drivers/crypto/amlogic/Kconfig"
|
||||
|
||||
config CRYPTO_DEV_SA2UL
|
||||
tristate "Support for TI security accelerator"
|
||||
depends on ARCH_K3 || COMPILE_TEST
|
||||
select ARM64_CRYPTO
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_ALGAPI
|
||||
select HW_RANDOM
|
||||
select SG_SPLIT
|
||||
help
|
||||
K3 devices include a security accelerator engine that may be
|
||||
used for crypto offload. Select this if you want to use hardware
|
||||
acceleration for cryptographic algorithms on these devices.
|
||||
|
||||
endif # CRYPTO_HW
|
||||
|
@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
|
||||
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
|
||||
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
|
||||
obj-$(CONFIG_ARCH_STM32) += stm32/
|
||||
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
|
||||
|
@ -122,19 +122,17 @@ static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_requ
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
|
||||
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
|
||||
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
|
||||
int err;
|
||||
|
||||
skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
|
||||
skcipher_request_set_callback(subreq, areq->base.flags, NULL,
|
||||
NULL);
|
||||
skcipher_request_set_crypt(subreq, areq->src, areq->dst,
|
||||
skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
|
||||
skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
|
||||
areq->base.complete, areq->base.data);
|
||||
skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
|
||||
areq->cryptlen, areq->iv);
|
||||
if (ctx->mode & SS_DECRYPTION)
|
||||
err = crypto_skcipher_decrypt(subreq);
|
||||
err = crypto_skcipher_decrypt(&ctx->fallback_req);
|
||||
else
|
||||
err = crypto_skcipher_encrypt(subreq);
|
||||
skcipher_request_zero(subreq);
|
||||
err = crypto_skcipher_encrypt(&ctx->fallback_req);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -494,23 +492,25 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
|
||||
alg.crypto.base);
|
||||
op->ss = algt->ss;
|
||||
|
||||
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
|
||||
sizeof(struct sun4i_cipher_req_ctx));
|
||||
|
||||
op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(op->fallback_tfm)) {
|
||||
dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
|
||||
name, PTR_ERR(op->fallback_tfm));
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
|
||||
sizeof(struct sun4i_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm));
|
||||
|
||||
|
||||
err = pm_runtime_get_sync(op->ss->dev);
|
||||
if (err < 0)
|
||||
goto error_pm;
|
||||
|
||||
return 0;
|
||||
error_pm:
|
||||
crypto_free_sync_skcipher(op->fallback_tfm);
|
||||
crypto_free_skcipher(op->fallback_tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -518,7 +518,7 @@ void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_sync_skcipher(op->fallback_tfm);
|
||||
crypto_free_skcipher(op->fallback_tfm);
|
||||
pm_runtime_put(op->ss->dev);
|
||||
}
|
||||
|
||||
@ -546,10 +546,10 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
op->keylen = keylen;
|
||||
memcpy(op->key, key, keylen);
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
||||
/* check and set the DES key, prepare the mode to be used */
|
||||
@ -566,10 +566,10 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
op->keylen = keylen;
|
||||
memcpy(op->key, key, keylen);
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
||||
/* check and set the 3DES key, prepare the mode to be used */
|
||||
@ -586,9 +586,9 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
op->keylen = keylen;
|
||||
memcpy(op->key, key, keylen);
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
|
||||
}
|
||||
|
@ -170,11 +170,12 @@ struct sun4i_tfm_ctx {
|
||||
u32 keylen;
|
||||
u32 keymode;
|
||||
struct sun4i_ss_ctx *ss;
|
||||
struct crypto_sync_skcipher *fallback_tfm;
|
||||
struct crypto_skcipher *fallback_tfm;
|
||||
};
|
||||
|
||||
struct sun4i_cipher_req_ctx {
|
||||
u32 mode;
|
||||
struct skcipher_request fallback_req; // keep at the end
|
||||
};
|
||||
|
||||
struct sun4i_req_ctx {
|
||||
|
@ -58,23 +58,20 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
|
||||
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||
struct sun8i_ce_alg_template *algt;
|
||||
#endif
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
|
||||
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
|
||||
algt->stat_fb++;
|
||||
#endif
|
||||
|
||||
skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
|
||||
skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, areq->src, areq->dst,
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
|
||||
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
|
||||
areq->base.complete, areq->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
|
||||
areq->cryptlen, areq->iv);
|
||||
if (rctx->op_dir & CE_DECRYPTION)
|
||||
err = crypto_skcipher_decrypt(subreq);
|
||||
err = crypto_skcipher_decrypt(&rctx->fallback_req);
|
||||
else
|
||||
err = crypto_skcipher_encrypt(subreq);
|
||||
skcipher_request_zero(subreq);
|
||||
err = crypto_skcipher_encrypt(&rctx->fallback_req);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -335,18 +332,20 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
|
||||
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
|
||||
op->ce = algt->ce;
|
||||
|
||||
sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
|
||||
|
||||
op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(op->fallback_tfm)) {
|
||||
dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
|
||||
name, PTR_ERR(op->fallback_tfm));
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm);
|
||||
|
||||
|
||||
dev_info(op->ce->dev, "Fallback for %s is %s\n",
|
||||
crypto_tfm_alg_driver_name(&sktfm->base),
|
||||
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
|
||||
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
|
||||
|
||||
op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
|
||||
op->enginectx.op.prepare_request = NULL;
|
||||
@ -358,7 +357,8 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
|
||||
|
||||
return 0;
|
||||
error_pm:
|
||||
crypto_free_sync_skcipher(op->fallback_tfm);
|
||||
pm_runtime_put_noidle(op->ce->dev);
|
||||
crypto_free_skcipher(op->fallback_tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -370,7 +370,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
|
||||
memzero_explicit(op->key, op->keylen);
|
||||
kfree(op->key);
|
||||
}
|
||||
crypto_free_sync_skcipher(op->fallback_tfm);
|
||||
crypto_free_skcipher(op->fallback_tfm);
|
||||
pm_runtime_put_sync_suspend(op->ce->dev);
|
||||
}
|
||||
|
||||
@ -400,10 +400,10 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
if (!op->key)
|
||||
return -ENOMEM;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
||||
int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
@ -425,8 +425,8 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
if (!op->key)
|
||||
return -ENOMEM;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
@ -185,7 +185,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
@ -211,7 +212,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
@ -236,7 +238,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
@ -262,7 +265,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
|
@ -181,12 +181,14 @@ struct sun8i_ce_dev {
|
||||
|
||||
/*
|
||||
* struct sun8i_cipher_req_ctx - context for a skcipher request
|
||||
* @op_dir: direction (encrypt vs decrypt) for this request
|
||||
* @flow: the flow to use for this request
|
||||
* @op_dir: direction (encrypt vs decrypt) for this request
|
||||
* @flow: the flow to use for this request
|
||||
* @fallback_req: request struct for invoking the fallback skcipher TFM
|
||||
*/
|
||||
struct sun8i_cipher_req_ctx {
|
||||
u32 op_dir;
|
||||
int flow;
|
||||
struct skcipher_request fallback_req; // keep at the end
|
||||
};
|
||||
|
||||
/*
|
||||
@ -202,7 +204,7 @@ struct sun8i_cipher_tfm_ctx {
|
||||
u32 *key;
|
||||
u32 keylen;
|
||||
struct sun8i_ce_dev *ce;
|
||||
struct crypto_sync_skcipher *fallback_tfm;
|
||||
struct crypto_skcipher *fallback_tfm;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -73,7 +73,6 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
|
||||
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
|
||||
int err;
|
||||
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
|
||||
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||
struct sun8i_ss_alg_template *algt;
|
||||
@ -81,15 +80,15 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
|
||||
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
|
||||
algt->stat_fb++;
|
||||
#endif
|
||||
skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
|
||||
skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, areq->src, areq->dst,
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
|
||||
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
|
||||
areq->base.complete, areq->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
|
||||
areq->cryptlen, areq->iv);
|
||||
if (rctx->op_dir & SS_DECRYPTION)
|
||||
err = crypto_skcipher_decrypt(subreq);
|
||||
err = crypto_skcipher_decrypt(&rctx->fallback_req);
|
||||
else
|
||||
err = crypto_skcipher_encrypt(subreq);
|
||||
skcipher_request_zero(subreq);
|
||||
err = crypto_skcipher_encrypt(&rctx->fallback_req);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -334,18 +333,20 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
|
||||
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
|
||||
op->ss = algt->ss;
|
||||
|
||||
sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
|
||||
|
||||
op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(op->fallback_tfm)) {
|
||||
dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
|
||||
name, PTR_ERR(op->fallback_tfm));
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm);
|
||||
|
||||
|
||||
dev_info(op->ss->dev, "Fallback for %s is %s\n",
|
||||
crypto_tfm_alg_driver_name(&sktfm->base),
|
||||
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
|
||||
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
|
||||
|
||||
op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
|
||||
op->enginectx.op.prepare_request = NULL;
|
||||
@ -359,7 +360,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
|
||||
|
||||
return 0;
|
||||
error_pm:
|
||||
crypto_free_sync_skcipher(op->fallback_tfm);
|
||||
crypto_free_skcipher(op->fallback_tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -371,7 +372,7 @@ void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
|
||||
memzero_explicit(op->key, op->keylen);
|
||||
kfree(op->key);
|
||||
}
|
||||
crypto_free_sync_skcipher(op->fallback_tfm);
|
||||
crypto_free_skcipher(op->fallback_tfm);
|
||||
pm_runtime_put_sync(op->ss->dev);
|
||||
}
|
||||
|
||||
@ -401,10 +402,10 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
if (!op->key)
|
||||
return -ENOMEM;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
||||
int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
@ -427,8 +428,8 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
if (!op->key)
|
||||
return -ENOMEM;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
@ -169,7 +169,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
@ -195,7 +196,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
@ -220,7 +222,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
@ -246,7 +249,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
|
@ -135,17 +135,18 @@ struct sun8i_ss_dev {
|
||||
|
||||
/*
|
||||
* struct sun8i_cipher_req_ctx - context for a skcipher request
|
||||
* @t_src: list of mapped SGs with their size
|
||||
* @t_dst: list of mapped SGs with their size
|
||||
* @p_key: DMA address of the key
|
||||
* @p_iv: DMA address of the IV
|
||||
* @method: current algorithm for this request
|
||||
* @op_mode: op_mode for this request
|
||||
* @op_dir: direction (encrypt vs decrypt) for this request
|
||||
* @flow: the flow to use for this request
|
||||
* @ivlen: size of biv
|
||||
* @keylen: keylen for this request
|
||||
* @biv: buffer which contain the IV
|
||||
* @t_src: list of mapped SGs with their size
|
||||
* @t_dst: list of mapped SGs with their size
|
||||
* @p_key: DMA address of the key
|
||||
* @p_iv: DMA address of the IV
|
||||
* @method: current algorithm for this request
|
||||
* @op_mode: op_mode for this request
|
||||
* @op_dir: direction (encrypt vs decrypt) for this request
|
||||
* @flow: the flow to use for this request
|
||||
* @ivlen: size of biv
|
||||
* @keylen: keylen for this request
|
||||
* @biv: buffer which contain the IV
|
||||
* @fallback_req: request struct for invoking the fallback skcipher TFM
|
||||
*/
|
||||
struct sun8i_cipher_req_ctx {
|
||||
struct sginfo t_src[MAX_SG];
|
||||
@ -159,6 +160,7 @@ struct sun8i_cipher_req_ctx {
|
||||
unsigned int ivlen;
|
||||
unsigned int keylen;
|
||||
void *biv;
|
||||
struct skcipher_request fallback_req; // keep at the end
|
||||
};
|
||||
|
||||
/*
|
||||
@ -174,7 +176,7 @@ struct sun8i_cipher_tfm_ctx {
|
||||
u32 *key;
|
||||
u32 keylen;
|
||||
struct sun8i_ss_dev *ss;
|
||||
struct crypto_sync_skcipher *fallback_tfm;
|
||||
struct crypto_skcipher *fallback_tfm;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1,7 +1,7 @@
|
||||
config CRYPTO_DEV_AMLOGIC_GXL
|
||||
tristate "Support for amlogic cryptographic offloader"
|
||||
depends on HAS_IOMEM
|
||||
default y if ARCH_MESON
|
||||
default m if ARCH_MESON
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_ENGINE
|
||||
select CRYPTO_ECB
|
||||
|
@ -64,22 +64,20 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq)
|
||||
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||
struct meson_alg_template *algt;
|
||||
#endif
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm);
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
|
||||
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
|
||||
algt->stat_fb++;
|
||||
#endif
|
||||
skcipher_request_set_sync_tfm(req, op->fallback_tfm);
|
||||
skcipher_request_set_callback(req, areq->base.flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, areq->src, areq->dst,
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
|
||||
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
|
||||
areq->base.complete, areq->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
|
||||
areq->cryptlen, areq->iv);
|
||||
|
||||
if (rctx->op_dir == MESON_DECRYPT)
|
||||
err = crypto_skcipher_decrypt(req);
|
||||
err = crypto_skcipher_decrypt(&rctx->fallback_req);
|
||||
else
|
||||
err = crypto_skcipher_encrypt(req);
|
||||
skcipher_request_zero(req);
|
||||
err = crypto_skcipher_encrypt(&rctx->fallback_req);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -321,15 +319,16 @@ int meson_cipher_init(struct crypto_tfm *tfm)
|
||||
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
|
||||
op->mc = algt->mc;
|
||||
|
||||
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx);
|
||||
|
||||
op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(op->fallback_tfm)) {
|
||||
dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
|
||||
name, PTR_ERR(op->fallback_tfm));
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm);
|
||||
|
||||
op->enginectx.op.do_one_request = meson_handle_cipher_request;
|
||||
op->enginectx.op.prepare_request = NULL;
|
||||
op->enginectx.op.unprepare_request = NULL;
|
||||
@ -345,7 +344,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm)
|
||||
memzero_explicit(op->key, op->keylen);
|
||||
kfree(op->key);
|
||||
}
|
||||
crypto_free_sync_skcipher(op->fallback_tfm);
|
||||
crypto_free_skcipher(op->fallback_tfm);
|
||||
}
|
||||
|
||||
int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
@ -377,5 +376,5 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
if (!op->key)
|
||||
return -ENOMEM;
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
@ -54,7 +54,8 @@ static struct meson_alg_template mc_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
@ -79,7 +80,8 @@ static struct meson_alg_template mc_algs[] = {
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 0xf,
|
||||
|
@ -109,6 +109,7 @@ struct meson_dev {
|
||||
struct meson_cipher_req_ctx {
|
||||
u32 op_dir;
|
||||
int flow;
|
||||
struct skcipher_request fallback_req; // keep at the end
|
||||
};
|
||||
|
||||
/*
|
||||
@ -126,7 +127,7 @@ struct meson_cipher_tfm_ctx {
|
||||
u32 keylen;
|
||||
u32 keymode;
|
||||
struct meson_dev *mc;
|
||||
struct crypto_sync_skcipher *fallback_tfm;
|
||||
struct crypto_skcipher *fallback_tfm;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -2630,7 +2630,8 @@ static struct ahash_alg hash_algos[] = {
|
||||
.cra_name = "sha1",
|
||||
.cra_driver_name = "artpec-sha1",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA1_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
|
||||
.cra_alignmask = 3,
|
||||
@ -2653,7 +2654,8 @@ static struct ahash_alg hash_algos[] = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name = "artpec-sha256",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
|
||||
.cra_alignmask = 3,
|
||||
@ -2677,7 +2679,8 @@ static struct ahash_alg hash_algos[] = {
|
||||
.cra_name = "hmac(sha256)",
|
||||
.cra_driver_name = "artpec-hmac-sha256",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
|
||||
.cra_alignmask = 3,
|
||||
@ -2696,7 +2699,8 @@ static struct skcipher_alg crypto_algos[] = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "artpec6-ecb-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
|
||||
.cra_alignmask = 3,
|
||||
@ -2717,6 +2721,7 @@ static struct skcipher_alg crypto_algos[] = {
|
||||
.cra_driver_name = "artpec6-ctr-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
|
||||
@ -2738,7 +2743,8 @@ static struct skcipher_alg crypto_algos[] = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "artpec6-cbc-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
|
||||
.cra_alignmask = 3,
|
||||
@ -2759,7 +2765,8 @@ static struct skcipher_alg crypto_algos[] = {
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "artpec6-xts-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
|
||||
.cra_alignmask = 3,
|
||||
@ -2790,6 +2797,7 @@ static struct aead_alg aead_algos[] = {
|
||||
.cra_driver_name = "artpec-gcm-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
|
||||
|
@ -3233,7 +3233,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
@ -3256,7 +3258,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
@ -3279,7 +3283,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
@ -3302,7 +3308,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des))",
|
||||
.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
@ -3325,7 +3333,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
@ -3348,7 +3358,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha224),cbc(des))",
|
||||
.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
@ -3371,7 +3383,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(des))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
@ -3394,7 +3408,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha384),cbc(des))",
|
||||
.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
@ -3417,7 +3433,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha512),cbc(des))",
|
||||
.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
@ -3440,7 +3458,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
@ -3463,7 +3483,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
@ -3486,7 +3508,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
@ -3509,7 +3533,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
@ -3532,7 +3558,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
@ -3555,7 +3583,9 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY
|
||||
},
|
||||
.setkey = aead_authenc_setkey,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
@ -3811,7 +3841,8 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_name = "md5",
|
||||
.cra_driver_name = "md5-iproc",
|
||||
.cra_blocksize = MD5_BLOCK_WORDS * 4,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
}
|
||||
},
|
||||
.cipher_info = {
|
||||
@ -4508,7 +4539,9 @@ static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
|
||||
crypto->base.cra_priority = cipher_pri;
|
||||
crypto->base.cra_alignmask = 0;
|
||||
crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
|
||||
crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
|
||||
crypto->init = skcipher_init_tfm;
|
||||
crypto->exit = skcipher_exit_tfm;
|
||||
@ -4547,7 +4580,8 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
|
||||
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
|
||||
hash->halg.base.cra_init = ahash_cra_init;
|
||||
hash->halg.base.cra_exit = generic_cra_exit;
|
||||
hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
|
||||
hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY;
|
||||
hash->halg.statesize = sizeof(struct spu_hash_export_s);
|
||||
|
||||
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
|
||||
@ -4591,7 +4625,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
|
||||
aead->base.cra_alignmask = 0;
|
||||
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
|
||||
|
||||
aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
|
||||
aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
|
||||
/* setkey set in alg initialization */
|
||||
aead->setauthsize = aead_setauthsize;
|
||||
aead->encrypt = aead_encrypt;
|
||||
|
@ -810,12 +810,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
|
||||
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
|
||||
}
|
||||
|
||||
static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return skcipher_setkey(skcipher, key, keylen, 0);
|
||||
}
|
||||
|
||||
static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
@ -838,7 +832,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
||||
u32 *desc;
|
||||
|
||||
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
|
||||
dev_err(jrdev, "key size mismatch\n");
|
||||
dev_dbg(jrdev, "key size mismatch\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1967,21 +1961,6 @@ static struct caam_skcipher_alg driver_algs[] = {
|
||||
},
|
||||
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
|
||||
},
|
||||
{
|
||||
.skcipher = {
|
||||
.base = {
|
||||
.cra_name = "ecb(arc4)",
|
||||
.cra_driver_name = "ecb-arc4-caam",
|
||||
.cra_blocksize = ARC4_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = arc4_skcipher_setkey,
|
||||
.encrypt = skcipher_encrypt,
|
||||
.decrypt = skcipher_decrypt,
|
||||
.min_keysize = ARC4_MIN_KEY_SIZE,
|
||||
.max_keysize = ARC4_MAX_KEY_SIZE,
|
||||
},
|
||||
.caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
|
||||
},
|
||||
};
|
||||
|
||||
static struct caam_aead_alg driver_aeads[] = {
|
||||
@ -3433,7 +3412,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
|
||||
alg->base.cra_module = THIS_MODULE;
|
||||
alg->base.cra_priority = CAAM_CRA_PRIORITY;
|
||||
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
|
||||
alg->init = caam_cra_init;
|
||||
alg->exit = caam_cra_exit;
|
||||
@ -3446,7 +3426,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
|
||||
alg->base.cra_module = THIS_MODULE;
|
||||
alg->base.cra_priority = CAAM_CRA_PRIORITY;
|
||||
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
|
||||
alg->init = caam_aead_init;
|
||||
alg->exit = caam_aead_exit;
|
||||
@ -3457,7 +3438,6 @@ int caam_algapi_init(struct device *ctrldev)
|
||||
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||
int i = 0, err = 0;
|
||||
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
|
||||
u32 arc4_inst;
|
||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||
bool registered = false, gcm_support;
|
||||
|
||||
@ -3477,8 +3457,6 @@ int caam_algapi_init(struct device *ctrldev)
|
||||
CHA_ID_LS_DES_SHIFT;
|
||||
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
|
||||
md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
|
||||
arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
|
||||
CHA_ID_LS_ARC4_SHIFT;
|
||||
ccha_inst = 0;
|
||||
ptha_inst = 0;
|
||||
|
||||
@ -3499,7 +3477,6 @@ int caam_algapi_init(struct device *ctrldev)
|
||||
md_inst = mdha & CHA_VER_NUM_MASK;
|
||||
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
|
||||
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
|
||||
arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
|
||||
|
||||
gcm_support = aesa & CHA_VER_MISC_AES_GCM;
|
||||
}
|
||||
@ -3522,10 +3499,6 @@ int caam_algapi_init(struct device *ctrldev)
|
||||
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
|
||||
continue;
|
||||
|
||||
/* Skip ARC4 algorithms if not supported by device */
|
||||
if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Check support for AES modes not available
|
||||
* on LP devices.
|
||||
|
@ -728,7 +728,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
||||
int ret = 0;
|
||||
|
||||
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
|
||||
dev_err(jrdev, "key size mismatch\n");
|
||||
dev_dbg(jrdev, "key size mismatch\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2502,7 +2502,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
|
||||
alg->base.cra_module = THIS_MODULE;
|
||||
alg->base.cra_priority = CAAM_CRA_PRIORITY;
|
||||
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
|
||||
alg->init = caam_cra_init;
|
||||
alg->exit = caam_cra_exit;
|
||||
@ -2515,7 +2516,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
|
||||
alg->base.cra_module = THIS_MODULE;
|
||||
alg->base.cra_priority = CAAM_CRA_PRIORITY;
|
||||
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
|
||||
alg->init = caam_aead_init;
|
||||
alg->exit = caam_aead_exit;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user