Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:
   - Introduce crypto_shash_tfm_digest() and use it wherever possible.
   - Fix use-after-free and race in crypto_spawn_alg.
   - Add support for parallel and batch requests to crypto_engine.

  Algorithms:
   - Update jitter RNG for SP800-90B compliance.
   - Always use jitter RNG as seed in drbg.

  Drivers:
   - Add Arm CryptoCell driver cctrng.
   - Add support for SEV-ES to the PSP driver in ccp"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (114 commits)
  crypto: hisilicon - fix driver compatibility issue with different versions of devices
  crypto: engine - do not requeue in case of fatal error
  crypto: cavium/nitrox - Fix a typo in a comment
  crypto: hisilicon/qm - change debugfs file name from qm_regs to regs
  crypto: hisilicon/qm - add DebugFS for xQC and xQE dump
  crypto: hisilicon/zip - add debugfs for Hisilicon ZIP
  crypto: hisilicon/hpre - add debugfs for Hisilicon HPRE
  crypto: hisilicon/sec2 - add debugfs for Hisilicon SEC
  crypto: hisilicon/qm - add debugfs to the QM state machine
  crypto: hisilicon/qm - add debugfs for QM
  crypto: stm32/crc32 - protect from concurrent accesses
  crypto: stm32/crc32 - don't sleep in runtime pm
  crypto: stm32/crc32 - fix multi-instance
  crypto: stm32/crc32 - fix run-time self test issue.
  crypto: stm32/crc32 - fix ext4 chksum BUG_ON()
  crypto: hisilicon/zip - Use temporary sqe when doing work
  crypto: hisilicon - add device error report through abnormal irq
  crypto: hisilicon - remove codes of directly report device errors through MSI
  crypto: hisilicon - QM memory management optimization
  crypto: hisilicon - unify initial value assignment into QM
  ...
This commit is contained in:
Linus Torvalds 2020-06-01 12:00:10 -07:00
commit 81e8c10dac
127 changed files with 5021 additions and 1849 deletions

View File

@ -33,7 +33,7 @@ Contact: linux-crypto@vger.kernel.org
Description: Dump debug registers from the HPRE.
Only available for PF.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/qm_regs
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/regs
Date: Sep 2019
Contact: linux-crypto@vger.kernel.org
Description: Dump debug registers from the QM.
@ -44,14 +44,97 @@ What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/current_q
Date: Sep 2019
Contact: linux-crypto@vger.kernel.org
Description: One QM may contain multiple queues. Select specific queue to
show its debug registers in above qm_regs.
show its debug registers in above regs.
Only available for PF.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/clear_enable
Date: Sep 2019
Contact: linux-crypto@vger.kernel.org
Description: QM debug registers(qm_regs) read clear control. 1 means enable
Description: QM debug registers(regs) read clear control. 1 means enable
register read clear, otherwise 0.
Writing to this file has no functional effect, only enable or
disable counters clear after reading of these registers.
Only available for PF.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/err_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of invalid interrupts for
QM task completion.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/aeq_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of QM async event queue interrupts.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/abnormal_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of interrupts for QM abnormal event.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/create_qp_err
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of queue allocation errors.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/mb_err
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of failed QM mailbox commands.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/status
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of sent requests.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/recv_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of received requests.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_busy_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of requests sent
with returning busy.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_fail_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of completed but error requests.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/invalid_req_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of invalid requests being received.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/overtime_thrhld
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Set the threshold time for counting the request which is
processed longer than the threshold.
0: disable(default), 1: 1 microsecond.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/over_thrhld_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of time out requests.
Available for both PF and VF, and take no other effect on HPRE.

View File

@ -1,10 +1,4 @@
What: /sys/kernel/debug/hisi_sec/<bdf>/sec_dfx
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: Dump the debug registers of SEC cores.
Only available for PF.
What: /sys/kernel/debug/hisi_sec/<bdf>/clear_enable
What: /sys/kernel/debug/hisi_sec2/<bdf>/clear_enable
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: Enabling/disabling of clear action after reading
@ -12,7 +6,7 @@ Description: Enabling/disabling of clear action after reading
0: disable, 1: enable.
Only available for PF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec/<bdf>/current_qm
What: /sys/kernel/debug/hisi_sec2/<bdf>/current_qm
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: One SEC controller has one PF and multiple VFs, each function
@ -20,24 +14,100 @@ Description: One SEC controller has one PF and multiple VFs, each function
qm refers to.
Only available for PF.
What: /sys/kernel/debug/hisi_sec/<bdf>/qm/qm_regs
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/qm_regs
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: Dump of QM related debug registers.
Available for PF and VF in host. VF in guest currently only
has one debug register.
What: /sys/kernel/debug/hisi_sec/<bdf>/qm/current_q
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/current_q
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: One QM of SEC may contain multiple queues. Select specific
queue to show its debug registers in above 'qm_regs'.
queue to show its debug registers in above 'regs'.
Only available for PF.
What: /sys/kernel/debug/hisi_sec/<bdf>/qm/clear_enable
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/clear_enable
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
Description: Enabling/disabling of clear action after reading
the SEC's QM debug registers.
0: disable, 1: enable.
Only available for PF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/err_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of invalid interrupts for
QM task completion.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/aeq_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of QM async event queue interrupts.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/abnormal_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of interrupts for QM abnormal event.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/create_qp_err
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of queue allocation errors.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/mb_err
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of failed QM mailbox commands.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/status
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of sent requests.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/recv_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of received requests.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_busy_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of requests sent with returning busy.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/err_bd_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of BD type error requests
to be received.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/invalid_req_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of invalid requests being received.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/done_flag_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of completed but marked error requests
to be received.
Available for both PF and VF, and take no other effect on SEC.

View File

@ -26,7 +26,7 @@ Description: One ZIP controller has one PF and multiple VFs, each function
has a QM. Select the QM which below qm refers to.
Only available for PF.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/qm_regs
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/regs
Date: Nov 2018
Contact: linux-crypto@vger.kernel.org
Description: Dump of QM related debug registers.
@ -37,14 +37,78 @@ What: /sys/kernel/debug/hisi_zip/<bdf>/qm/current_q
Date: Nov 2018
Contact: linux-crypto@vger.kernel.org
Description: One QM may contain multiple queues. Select specific queue to
show its debug registers in above qm_regs.
show its debug registers in above regs.
Only available for PF.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/clear_enable
Date: Nov 2018
Contact: linux-crypto@vger.kernel.org
Description: QM debug registers(qm_regs) read clear control. 1 means enable
Description: QM debug registers(regs) read clear control. 1 means enable
register read clear, otherwise 0.
Writing to this file has no functional effect, only enable or
disable counters clear after reading of these registers.
Only available for PF.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/err_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of invalid interrupts for
QM task completion.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/aeq_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of QM async event queue interrupts.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/abnormal_irq
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of interrupts for QM abnormal event.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/create_qp_err
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of queue allocation errors.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/mb_err
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the number of failed QM mailbox commands.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/status
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of sent requests.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/recv_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of received requests.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_busy_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of requests received
with returning busy.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/err_bd_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of BD type error requests
to be received.
Available for both PF and VF, and take no other effect on ZIP.

View File

@ -0,0 +1,54 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/rng/arm-cctrng.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Arm TrustZone CryptoCell TRNG engine
maintainers:
- Hadar Gat <hadar.gat@arm.com>
description: |+
Arm TrustZone CryptoCell TRNG (True Random Number Generator) engine.
properties:
compatible:
enum:
- arm,cryptocell-713-trng
- arm,cryptocell-703-trng
interrupts:
maxItems: 1
reg:
maxItems: 1
arm,rosc-ratio:
description:
Arm TrustZone CryptoCell TRNG engine has 4 ring oscillators.
Sampling ratio values for these 4 ring oscillators. (from calibration)
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- items:
maxItems: 4
clocks:
maxItems: 1
required:
- compatible
- interrupts
- reg
- arm,rosc-ratio
additionalProperties: false
examples:
- |
arm_cctrng: rng@60000000 {
compatible = "arm,cryptocell-713-trng";
interrupts = <0 29 4>;
reg = <0x60000000 0x10000>;
arm,rosc-ratio = <5000 1000 500 0>;
};

View File

@ -7,7 +7,7 @@ SipHash - a short input PRF
SipHash is a cryptographically secure PRF -- a keyed hash function -- that
performs very well for short inputs, hence the name. It was designed by
cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended
as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`,
as a replacement for some uses of: `jhash`, `md5_transform`, `sha1_transform`,
and so forth.
SipHash takes a secret key filled with randomly generated numbers and either

View File

@ -3908,6 +3908,15 @@ S: Supported
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
F: drivers/crypto/ccree/
CCTRNG ARM TRUSTZONE CRYPTOCELL TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER
M: Hadar Gat <hadar.gat@arm.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/char/hw_random/cctrng.c
F: drivers/char/hw_random/cctrng.h
F: Documentation/devicetree/bindings/rng/arm-cctrng.txt
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
CEC FRAMEWORK
M: Hans Verkuil <hverkuil-cisco@xs4all.nl>
L: linux-media@vger.kernel.org

View File

@ -14,7 +14,6 @@
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>

View File

@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>

View File

@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>

View File

@ -11,7 +11,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>

View File

@ -7,7 +7,6 @@
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>

View File

@ -158,7 +158,6 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
unsigned int key_len)
{
struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
SHASH_DESC_ON_STACK(desc, ctx->hash);
u8 digest[SHA256_DIGEST_SIZE];
int ret;
@ -166,8 +165,7 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
if (ret)
return ret;
desc->tfm = ctx->hash;
crypto_shash_digest(desc, in_key, key_len, digest);
crypto_shash_tfm_digest(ctx->hash, in_key, key_len, digest);
return aes_expandkey(&ctx->key2, digest, sizeof(digest));
}

View File

@ -66,7 +66,7 @@
#include <asm/assembler.h>
.text
.cpu generic+crypto
.arch armv8-a+crypto
init_crc .req w19
buf .req x20

View File

@ -12,7 +12,6 @@
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>

View File

@ -6,7 +6,6 @@
*/
#include <crypto/internal/hash.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>

View File

@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>

View File

@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#include <linux/cryptohash.h>
#include <asm/octeon/octeon.h>
#include <crypto/internal/hash.h>

View File

@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/md5.h>
#include <asm/byteorder.h>

View File

@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>

View File

@ -16,14 +16,13 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
extern void powerpc_sha_transform(u32 *state, const u8 *src, u32 *temp);
void powerpc_sha_transform(u32 *state, const u8 *src);
static int sha1_init(struct shash_desc *desc)
static int powerpc_sha1_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@ -34,8 +33,8 @@ static int sha1_init(struct shash_desc *desc)
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
@ -47,7 +46,6 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
src = data;
if ((partial + len) > 63) {
u32 temp[SHA_WORKSPACE_WORDS];
if (partial) {
done = -partial;
@ -56,12 +54,11 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
}
do {
powerpc_sha_transform(sctx->state, src, temp);
powerpc_sha_transform(sctx->state, src);
done += 64;
src = data + done;
} while (done + 63 < len);
memzero_explicit(temp, sizeof(temp));
partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
@ -71,7 +68,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
static int powerpc_sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
@ -84,10 +81,10 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
/* Pad out to 56 mod 64 */
index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(desc, padding, padlen);
powerpc_sha1_update(desc, padding, padlen);
/* Append length */
sha1_update(desc, (const u8 *)&bits, sizeof(bits));
powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
@ -99,7 +96,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
return 0;
}
static int sha1_export(struct shash_desc *desc, void *out)
static int powerpc_sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@ -107,7 +104,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
static int powerpc_sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@ -117,11 +114,11 @@ static int sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.final = sha1_final,
.export = sha1_export,
.import = sha1_import,
.init = powerpc_sha1_init,
.update = powerpc_sha1_update,
.final = powerpc_sha1_final,
.export = powerpc_sha1_export,
.import = powerpc_sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {

View File

@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>

View File

@ -27,7 +27,7 @@
#include "sha.h"
static int sha1_init(struct shash_desc *desc)
static int s390_sha1_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
@ -42,7 +42,7 @@ static int sha1_init(struct shash_desc *desc)
return 0;
}
static int sha1_export(struct shash_desc *desc, void *out)
static int s390_sha1_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha1_state *octx = out;
@ -53,7 +53,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
static int s390_sha1_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha1_state *ictx = in;
@ -67,11 +67,11 @@ static int sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.init = s390_sha1_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha1_export,
.import = sha1_import,
.export = s390_sha1_export,
.import = s390_sha1_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha1_state),
.base = {

View File

@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/md5.h>

View File

@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>

View File

@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>

View File

@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>

View File

@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>

View File

@ -21,7 +21,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>

View File

@ -34,7 +34,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>

View File

@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/string.h>
#include <linux/types.h>
#include <crypto/sha.h>

View File

@ -370,7 +370,6 @@ config CRYPTO_CFB
config CRYPTO_CTR
tristate "CTR support"
select CRYPTO_SKCIPHER
select CRYPTO_SEQIV
select CRYPTO_MANAGER
help
CTR: Counter mode
@ -1820,7 +1819,7 @@ config CRYPTO_DRBG_HASH
config CRYPTO_DRBG_CTR
bool "Enable CTR DRBG"
select CRYPTO_AES
depends on CRYPTO_CTR
select CRYPTO_CTR
help
Enable the CTR DRBG variant as defined in NIST SP800-90A.

View File

@ -403,7 +403,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);
if (!err)
crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval);
crypto_notify(CRYPTO_MSG_ALG_LOADED, larval);
out:
crypto_larval_kill(&larval->alg);
@ -716,17 +716,27 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn);
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_alg *alg = ERR_PTR(-EAGAIN);
struct crypto_alg *target;
bool shoot = false;
down_read(&crypto_alg_sem);
alg = spawn->alg;
if (!spawn->dead && !crypto_mod_get(alg)) {
alg->cra_flags |= CRYPTO_ALG_DYING;
alg = NULL;
if (!spawn->dead) {
alg = spawn->alg;
if (!crypto_mod_get(alg)) {
target = crypto_alg_get(alg);
shoot = true;
alg = ERR_PTR(-EAGAIN);
}
}
up_read(&crypto_alg_sem);
return alg ?: ERR_PTR(-EAGAIN);
if (shoot) {
crypto_shoot_alg(target);
crypto_alg_put(target);
}
return alg;
}
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
@ -904,6 +914,14 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_enqueue_request);
void crypto_enqueue_request_head(struct crypto_queue *queue,
struct crypto_async_request *request)
{
queue->qlen++;
list_add(&request->list, &queue->list);
}
EXPORT_SYMBOL_GPL(crypto_enqueue_request_head);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
{
struct list_head *request;

View File

@ -61,7 +61,7 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct rng_ctx *ctx = ask->private;
int err = -EFAULT;
int err;
int genlen = 0;
u8 result[MAXSIZE];

View File

@ -333,12 +333,13 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
return len;
}
static void crypto_shoot_alg(struct crypto_alg *alg)
void crypto_shoot_alg(struct crypto_alg *alg)
{
down_write(&crypto_alg_sem);
alg->cra_flags |= CRYPTO_ALG_DYING;
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask)

View File

@ -129,7 +129,9 @@ static void blake2b_compress(struct blake2b_state *S,
ROUND(9);
ROUND(10);
ROUND(11);
#ifdef CONFIG_CC_IS_CLANG
#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */
#endif
for (i = 0; i < 8; ++i)
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
}

View File

@ -22,32 +22,36 @@
* @err: error number
*/
static void crypto_finalize_request(struct crypto_engine *engine,
struct crypto_async_request *req, int err)
struct crypto_async_request *req, int err)
{
unsigned long flags;
bool finalize_cur_req = false;
bool finalize_req = false;
int ret;
struct crypto_engine_ctx *enginectx;
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req)
finalize_cur_req = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
/*
* If hardware cannot enqueue more requests
* and retry mechanism is not supported
* make sure we are completing the current request
*/
if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) {
finalize_req = true;
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
if (finalize_cur_req) {
if (finalize_req || engine->retry_support) {
enginectx = crypto_tfm_ctx(req->tfm);
if (engine->cur_req_prepared &&
if (enginectx->op.prepare_request &&
enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine, req);
if (ret)
dev_err(engine->dev, "failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
engine->cur_req_prepared = false;
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
req->complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests);
@ -74,7 +78,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_lock_irqsave(&engine->queue_lock, flags);
/* Make sure we are not already running a request */
if (engine->cur_req)
if (!engine->retry_support && engine->cur_req)
goto out;
/* If another context is idling then defer */
@ -108,13 +112,21 @@ static void crypto_pump_requests(struct crypto_engine *engine,
goto out;
}
start_request:
/* Get the fist request from the engine queue to handle */
backlog = crypto_get_backlog(&engine->queue);
async_req = crypto_dequeue_request(&engine->queue);
if (!async_req)
goto out;
engine->cur_req = async_req;
/*
* If hardware doesn't support the retry mechanism,
* keep track of the request we are processing now.
* We'll need it on completion (crypto_finalize_request).
*/
if (!engine->retry_support)
engine->cur_req = async_req;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@ -130,7 +142,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
goto req_err;
goto req_err_2;
}
}
@ -141,28 +153,90 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (ret) {
dev_err(engine->dev, "failed to prepare request: %d\n",
ret);
goto req_err;
goto req_err_2;
}
engine->cur_req_prepared = true;
}
if (!enginectx->op.do_one_request) {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
goto req_err;
goto req_err_1;
}
ret = enginectx->op.do_one_request(engine, async_req);
if (ret) {
dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
goto req_err;
}
return;
req_err:
crypto_finalize_request(engine, async_req, ret);
ret = enginectx->op.do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
if (ret < 0) {
/*
* If hardware queue is full (-ENOSPC), requeue request
* regardless of backlog flag.
* Otherwise, unprepare and complete the request.
*/
if (!engine->retry_support ||
(ret != -ENOSPC)) {
dev_err(engine->dev,
"Failed to do one request from queue: %d\n",
ret);
goto req_err_1;
}
/*
* If retry mechanism is supported,
* unprepare current request and
* enqueue it back into crypto-engine queue.
*/
if (enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine,
async_req);
if (ret)
dev_err(engine->dev,
"failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If hardware was unable to execute request, enqueue it
* back in front of crypto-engine queue, to keep the order
* of requests.
*/
crypto_enqueue_request_head(&engine->queue, async_req);
kthread_queue_work(engine->kworker, &engine->pump_requests);
goto out;
}
goto retry;
req_err_1:
if (enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine, async_req);
if (ret)
dev_err(engine->dev, "failed to unprepare request\n");
}
req_err_2:
async_req->complete(async_req, ret);
retry:
/* If retry mechanism is supported, send new requests to engine */
if (engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
goto start_request;
}
return;
out:
spin_unlock_irqrestore(&engine->queue_lock, flags);
/*
* Batch requests is possible only if
* hardware can enqueue multiple requests
*/
if (engine->do_batch_requests) {
ret = engine->do_batch_requests(engine);
if (ret)
dev_err(engine->dev, "failed to do batch requests: %d\n",
ret);
}
return;
}
static void crypto_pump_work(struct kthread_work *work)
@ -386,15 +460,27 @@ int crypto_engine_stop(struct crypto_engine *engine)
EXPORT_SYMBOL_GPL(crypto_engine_stop);
/**
* crypto_engine_alloc_init - allocate crypto hardware engine structure and
* initialize it.
* crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
* and initialize it by setting the maximum number of entries in the software
* crypto-engine queue.
* @dev: the device attached with one hardware engine
* @retry_support: whether hardware has support for retry mechanism
* @cbk_do_batch: pointer to a callback function to be invoked when executing a
* a batch of requests.
* This has the form:
* callback(struct crypto_engine *engine)
* where:
* @engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
* @qlen: maximum size of the crypto-engine queue
*
* This must be called from context that can sleep.
* Return: the crypto engine structure on success, else NULL.
*/
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
struct crypto_engine *engine;
@ -411,12 +497,18 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
engine->running = false;
engine->busy = false;
engine->idling = false;
engine->cur_req_prepared = false;
engine->retry_support = retry_support;
engine->priv_data = dev;
/*
* Batch requests is possible only if
* hardware has support for retry mechanism.
*/
engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock);
engine->kworker = kthread_create_worker(0, "%s", engine->name);
@ -433,6 +525,22 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
return engine;
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
/**
* crypto_engine_alloc_init - allocate crypto hardware engine structure and
* initialize it.
* @dev: the device attached with one hardware engine
* @rt: whether this queue is set to run as a realtime task
*
* This must be called from context that can sleep.
* Return: the crypto engine structure on success, else NULL.
*/
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
{
return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
CRYPTO_ENGINE_MAX_QLEN);
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
/**

View File

@ -1087,10 +1087,6 @@ static void drbg_async_seed(struct work_struct *work)
if (ret)
goto unlock;
/* If nonblocking pool is initialized, deactivate Jitter RNG */
crypto_free_rng(drbg->jent);
drbg->jent = NULL;
/* Set seeded to false so that if __drbg_seed fails the
* next generate call will trigger a reseed.
*/
@ -1168,7 +1164,23 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
entropylen);
if (ret) {
pr_devel("DRBG: jent failed with %d\n", ret);
goto out;
/*
* Do not treat the transient failure of the
* Jitter RNG as an error that needs to be
* reported. The combined number of the
* maximum reseed threshold times the maximum
* number of Jitter RNG transient errors is
* less than the reseed threshold required by
* SP800-90A allowing us to treat the
* transient errors as such.
*
* However, we mandate that at least the first
* seeding operation must succeed with the
* Jitter RNG.
*/
if (!reseed || ret != -EAGAIN)
goto out;
}
drbg_string_fill(&data1, entropy, entropylen * 2);
@ -1294,8 +1306,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
GFP_KERNEL);
if (!drbg->prev)
if (!drbg->prev) {
ret = -ENOMEM;
goto fini;
}
drbg->fips_primed = false;
}
@ -1492,6 +1506,8 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
if (list_empty(&drbg->test_data.list))
return 0;
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
INIT_WORK(&drbg->seed_work, drbg_async_seed);
drbg->random_ready.owner = THIS_MODULE;
@ -1512,8 +1528,6 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
return err;
}
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
/*
* Require frequent reseeds until the seed source is fully
* initialized.

View File

@ -66,7 +66,6 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
SHASH_DESC_ON_STACK(desc, tctx->hash);
u8 salt[HASH_MAX_DIGESTSIZE];
int err;
@ -78,8 +77,7 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm,
if (err)
return err;
desc->tfm = tctx->hash;
err = crypto_shash_digest(desc, key, keylen, salt);
err = crypto_shash_tfm_digest(tctx->hash, key, keylen, salt);
if (err)
return err;

View File

@ -65,6 +65,7 @@ void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg,

View File

@ -108,6 +108,7 @@ void jent_get_nstime(__u64 *out)
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
unsigned int reset_cnt;
};
static int jent_kcapi_init(struct crypto_tfm *tfm)
@ -142,7 +143,33 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
int ret = 0;
spin_lock(&rng->jent_lock);
/* Return a permanent error in case we had too many resets in a row. */
if (rng->reset_cnt > (1<<10)) {
ret = -EFAULT;
goto out;
}
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
/* Reset RNG in case of health failures */
if (ret < -1) {
pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n",
(ret == -2) ? "Repetition Count Test" :
"Adaptive Proportion Test");
rng->reset_cnt++;
ret = -EAGAIN;
} else {
rng->reset_cnt = 0;
/* Convert the Jitter RNG error into a usable error code */
if (ret == -1)
ret = -EINVAL;
}
out:
spin_unlock(&rng->jent_lock);
return ret;

View File

@ -2,7 +2,7 @@
* Non-physical true random number generator based on timing jitter --
* Jitter RNG standalone code.
*
* Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2019
* Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2020
*
* Design
* ======
@ -47,7 +47,7 @@
/*
* This Jitterentropy RNG is based on the jitterentropy library
* version 2.1.2 provided at http://www.chronox.de/jent.html
* version 2.2.0 provided at http://www.chronox.de/jent.html
*/
#ifdef __OPTIMIZE__
@ -83,6 +83,22 @@ struct rand_data {
unsigned int memblocksize; /* Size of one memory block in bytes */
unsigned int memaccessloops; /* Number of memory accesses per random
* bit generation */
/* Repetition Count Test */
int rct_count; /* Number of stuck values */
/* Adaptive Proportion Test for a significance level of 2^-30 */
#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
#define JENT_APT_LSB 16
#define JENT_APT_WORD_MASK (JENT_APT_LSB - 1)
unsigned int apt_observations; /* Number of collected observations */
unsigned int apt_count; /* APT counter */
unsigned int apt_base; /* APT base reference */
unsigned int apt_base_set:1; /* APT base reference set? */
unsigned int health_failure:1; /* Permanent health failure */
};
/* Flags that can be used to initialize the RNG */
@ -98,13 +114,202 @@ struct rand_data {
* variations (2nd derivation of time is
* zero). */
#define JENT_ESTUCK 8 /* Too many stuck results during init. */
/***************************************************************************
* Helper functions
***************************************************************************/
#define JENT_EHEALTH 9 /* Health test failed during initialization */
#define JENT_ERCT 10 /* RCT failed during initialization */
#include "jitterentropy.h"
/***************************************************************************
* Adaptive Proportion Test
*
* This test complies with SP800-90B section 4.4.2.
***************************************************************************/
/**
* Reset the APT counter
*
* @ec [in] Reference to entropy collector
*/
static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked)
{
/* Reset APT counter */
ec->apt_count = 0;
ec->apt_base = delta_masked;
ec->apt_observations = 0;
}
/**
* Insert a new entropy event into APT
*
* @ec [in] Reference to entropy collector
* @delta_masked [in] Masked time delta to process
*/
static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
{
/* Initialize the base reference */
if (!ec->apt_base_set) {
ec->apt_base = delta_masked;
ec->apt_base_set = 1;
return;
}
if (delta_masked == ec->apt_base) {
ec->apt_count++;
if (ec->apt_count >= JENT_APT_CUTOFF)
ec->health_failure = 1;
}
ec->apt_observations++;
if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
jent_apt_reset(ec, delta_masked);
}
/***************************************************************************
* Stuck Test and its use as Repetition Count Test
*
* The Jitter RNG uses an enhanced version of the Repetition Count Test
* (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical
* back-to-back values, the input to the RCT is the counting of the stuck
* values during the generation of one Jitter RNG output block.
*
* The RCT is applied with an alpha of 2^{-30} compliant to FIPS 140-2 IG 9.8.
*
* During the counting operation, the Jitter RNG always calculates the RCT
* cut-off value of C. If that value exceeds the allowed cut-off value,
* the Jitter RNG output block will be calculated completely but discarded at
* the end. The caller of the Jitter RNG is informed with an error code.
***************************************************************************/
/**
* Repetition Count Test as defined in SP800-90B section 4.4.1
*
* @ec [in] Reference to entropy collector
* @stuck [in] Indicator whether the value is stuck
*/
static void jent_rct_insert(struct rand_data *ec, int stuck)
{
/*
* If we have a count less than zero, a previous RCT round identified
* a failure. We will not overwrite it.
*/
if (ec->rct_count < 0)
return;
if (stuck) {
ec->rct_count++;
/*
* The cutoff value is based on the following consideration:
* alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
* In addition, we require an entropy value H of 1/OSR as this
* is the minimum entropy required to provide full entropy.
* Note, we collect 64 * OSR deltas for inserting them into
* the entropy pool which should then have (close to) 64 bits
* of entropy.
*
* Note, ec->rct_count (which equals to value B in the pseudo
* code of SP800-90B section 4.4.1) starts with zero. Hence
* we need to subtract one from the cutoff value as calculated
* following SP800-90B.
*/
if ((unsigned int)ec->rct_count >= (31 * ec->osr)) {
ec->rct_count = -1;
ec->health_failure = 1;
}
} else {
ec->rct_count = 0;
}
}
/**
* Is there an RCT health test failure?
*
* @ec [in] Reference to entropy collector
*
* @return
* 0 No health test failure
* 1 Permanent health test failure
*/
static int jent_rct_failure(struct rand_data *ec)
{
if (ec->rct_count < 0)
return 1;
return 0;
}
static inline __u64 jent_delta(__u64 prev, __u64 next)
{
#define JENT_UINT64_MAX (__u64)(~((__u64) 0))
return (prev < next) ? (next - prev) :
(JENT_UINT64_MAX - prev + 1 + next);
}
/**
* Stuck test by checking the:
* 1st derivative of the jitter measurement (time delta)
* 2nd derivative of the jitter measurement (delta of time deltas)
* 3rd derivative of the jitter measurement (delta of delta of time deltas)
*
* All values must always be non-zero.
*
* @ec [in] Reference to entropy collector
* @current_delta [in] Jitter time delta
*
* @return
* 0 jitter measurement not stuck (good bit)
* 1 jitter measurement stuck (reject bit)
*/
static int jent_stuck(struct rand_data *ec, __u64 current_delta)
{
__u64 delta2 = jent_delta(ec->last_delta, current_delta);
__u64 delta3 = jent_delta(ec->last_delta2, delta2);
unsigned int delta_masked = current_delta & JENT_APT_WORD_MASK;
ec->last_delta = current_delta;
ec->last_delta2 = delta2;
/*
* Insert the result of the comparison of two back-to-back time
* deltas.
*/
jent_apt_insert(ec, delta_masked);
if (!current_delta || !delta2 || !delta3) {
/* RCT with a stuck bit */
jent_rct_insert(ec, 1);
return 1;
}
/* RCT with a non-stuck bit */
jent_rct_insert(ec, 0);
return 0;
}
/**
* Report any health test failures
*
* @ec [in] Reference to entropy collector
*
* @return
* 0 No health test failure
* 1 Permanent health test failure
*/
static int jent_health_failure(struct rand_data *ec)
{
/* Test is only enabled in FIPS mode */
if (!jent_fips_enabled())
return 0;
return ec->health_failure;
}
/***************************************************************************
* Noise sources
***************************************************************************/
/**
* Update of the loop count used for the next round of
* an entropy collection.
@ -148,10 +353,6 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
return (shuffle + (1<<min));
}
/***************************************************************************
* Noise sources
***************************************************************************/
/**
* CPU Jitter noise source -- this is the noise source based on the CPU
* execution time jitter
@ -166,18 +367,19 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
* the CPU execution time jitter. Any change to the loop in this function
* implies that careful retesting must be done.
*
* Input:
* @ec entropy collector struct
* @time time stamp to be injected
* @loop_cnt if a value not equal to 0 is set, use the given value as number of
* loops to perform the folding
* @ec [in] entropy collector struct
* @time [in] time stamp to be injected
* @loop_cnt [in] if a value not equal to 0 is set, use the given value as
* number of loops to perform the folding
* @stuck [in] Is the time stamp identified as stuck?
*
* Output:
* updated ec->data
*
* @return Number of loops the folding operation is performed
*/
static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt)
static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt,
int stuck)
{
unsigned int i;
__u64 j = 0;
@ -220,9 +422,17 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt)
new ^= tmp;
}
}
ec->data = new;
return fold_loop_cnt;
/*
* If the time stamp is stuck, do not finally insert the value into
* the entropy pool. Although this operation should not do any harm
* even when the time stamp has no entropy, SP800-90B requires that
* any conditioning operation (SP800-90B considers the LFSR to be a
* conditioning operation) to have an identical amount of input
* data according to section 3.1.5.
*/
if (!stuck)
ec->data = new;
}
/**
@ -243,16 +453,13 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt)
* to reliably access either L3 or memory, the ec->mem memory must be quite
* large which is usually not desirable.
*
* Input:
* @ec Reference to the entropy collector with the memory access data -- if
* the reference to the memory block to be accessed is NULL, this noise
* source is disabled
* @loop_cnt if a value not equal to 0 is set, use the given value as number of
* loops to perform the folding
*
* @return Number of memory access operations
* @ec [in] Reference to the entropy collector with the memory access data -- if
* the reference to the memory block to be accessed is NULL, this noise
* source is disabled
* @loop_cnt [in] if a value not equal to 0 is set, use the given value
* number of loops to perform the LFSR
*/
static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
{
unsigned int wrap = 0;
__u64 i = 0;
@ -262,7 +469,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
if (NULL == ec || NULL == ec->mem)
return 0;
return;
wrap = ec->memblocksize * ec->memblocks;
/*
@ -288,43 +495,11 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
ec->memlocation = ec->memlocation + ec->memblocksize - 1;
ec->memlocation = ec->memlocation % wrap;
}
return i;
}
/***************************************************************************
* Start of entropy processing logic
***************************************************************************/
/**
* Stuck test by checking the:
* 1st derivation of the jitter measurement (time delta)
* 2nd derivation of the jitter measurement (delta of time deltas)
* 3rd derivation of the jitter measurement (delta of delta of time deltas)
*
* All values must always be non-zero.
*
* Input:
* @ec Reference to entropy collector
* @current_delta Jitter time delta
*
* @return
* 0 jitter measurement not stuck (good bit)
* 1 jitter measurement stuck (reject bit)
*/
static int jent_stuck(struct rand_data *ec, __u64 current_delta)
{
__s64 delta2 = ec->last_delta - current_delta;
__s64 delta3 = delta2 - ec->last_delta2;
ec->last_delta = current_delta;
ec->last_delta2 = delta2;
if (!current_delta || !delta2 || !delta3)
return 1;
return 0;
}
/**
* This is the heart of the entropy generation: calculate time deltas and
* use the CPU jitter in the time deltas. The jitter is injected into the
@ -334,8 +509,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
* of this function! This can be done by calling this function
* and not using its result.
*
* Input:
* @entropy_collector Reference to entropy collector
* @ec [in] Reference to entropy collector
*
* @return result of stuck test
*/
@ -343,6 +517,7 @@ static int jent_measure_jitter(struct rand_data *ec)
{
__u64 time = 0;
__u64 current_delta = 0;
int stuck;
/* Invoke one noise source before time measurement to add variations */
jent_memaccess(ec, 0);
@ -352,22 +527,23 @@ static int jent_measure_jitter(struct rand_data *ec)
* invocation to measure the timing variations
*/
jent_get_nstime(&time);
current_delta = time - ec->prev_time;
current_delta = jent_delta(ec->prev_time, time);
ec->prev_time = time;
/* Now call the next noise sources which also injects the data */
jent_lfsr_time(ec, current_delta, 0);
/* Check whether we have a stuck measurement. */
return jent_stuck(ec, current_delta);
stuck = jent_stuck(ec, current_delta);
/* Now call the next noise sources which also injects the data */
jent_lfsr_time(ec, current_delta, 0, stuck);
return stuck;
}
/**
* Generator of one 64 bit random number
* Function fills rand_data->data
*
* Input:
* @ec Reference to entropy collector
* @ec [in] Reference to entropy collector
*/
static void jent_gen_entropy(struct rand_data *ec)
{
@ -390,31 +566,6 @@ static void jent_gen_entropy(struct rand_data *ec)
}
}
/**
* The continuous test required by FIPS 140-2 -- the function automatically
* primes the test if needed.
*
* Return:
* returns normally if FIPS test passed
* panics the kernel if FIPS test failed
*/
static void jent_fips_test(struct rand_data *ec)
{
if (!jent_fips_enabled())
return;
/* prime the FIPS test */
if (!ec->old_data) {
ec->old_data = ec->data;
jent_gen_entropy(ec);
}
if (ec->data == ec->old_data)
jent_panic("jitterentropy: Duplicate output detected\n");
ec->old_data = ec->data;
}
/**
* Entry function: Obtain entropy for the caller.
*
@ -425,17 +576,18 @@ static void jent_fips_test(struct rand_data *ec)
* This function truncates the last 64 bit entropy value output to the exact
* size specified by the caller.
*
* Input:
* @ec Reference to entropy collector
* @data pointer to buffer for storing random data -- buffer must already
* exist
* @len size of the buffer, specifying also the requested number of random
* in bytes
* @ec [in] Reference to entropy collector
* @data [in] pointer to buffer for storing random data -- buffer must already
* exist
* @len [in] size of the buffer, specifying also the requested number of random
* in bytes
*
* @return 0 when request is fulfilled or an error
*
* The following error codes can occur:
* -1 entropy_collector is NULL
* -2 RCT failed
* -3 APT test failed
*/
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len)
@ -449,7 +601,42 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int tocopy;
jent_gen_entropy(ec);
jent_fips_test(ec);
if (jent_health_failure(ec)) {
int ret;
if (jent_rct_failure(ec))
ret = -2;
else
ret = -3;
/*
* Re-initialize the noise source
*
* If the health test fails, the Jitter RNG remains
* in failure state and will return a health failure
* during next invocation.
*/
if (jent_entropy_init())
return ret;
/* Set APT to initial state */
jent_apt_reset(ec, 0);
ec->apt_base_set = 0;
/* Set RCT to initial state */
ec->rct_count = 0;
/* Re-enable Jitter RNG */
ec->health_failure = 0;
/*
* Return the health test failure status to the
* caller as the generated value is not appropriate.
*/
return ret;
}
if ((DATA_SIZE_BITS / 8) < len)
tocopy = (DATA_SIZE_BITS / 8);
else
@ -513,11 +700,15 @@ int jent_entropy_init(void)
int i;
__u64 delta_sum = 0;
__u64 old_delta = 0;
unsigned int nonstuck = 0;
int time_backwards = 0;
int count_mod = 0;
int count_stuck = 0;
struct rand_data ec = { 0 };
/* Required for RCT */
ec.osr = 1;
/* We could perform statistical tests here, but the problem is
* that we only have a few loop counts to do testing. These
* loop counts may show some slight skew and we produce
@ -539,8 +730,10 @@ int jent_entropy_init(void)
/*
* TESTLOOPCOUNT needs some loops to identify edge systems. 100 is
* definitely too little.
*
* SP800-90B requires at least 1024 initial test cycles.
*/
#define TESTLOOPCOUNT 300
#define TESTLOOPCOUNT 1024
#define CLEARCACHE 100
for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
__u64 time = 0;
@ -552,13 +745,13 @@ int jent_entropy_init(void)
/* Invoke core entropy collection logic */
jent_get_nstime(&time);
ec.prev_time = time;
jent_lfsr_time(&ec, time, 0);
jent_lfsr_time(&ec, time, 0, 0);
jent_get_nstime(&time2);
/* test whether timer works */
if (!time || !time2)
return JENT_ENOTIME;
delta = time2 - time;
delta = jent_delta(time, time2);
/*
* test whether timer is fine grained enough to provide
* delta even when called shortly after each other -- this
@ -581,6 +774,28 @@ int jent_entropy_init(void)
if (stuck)
count_stuck++;
else {
nonstuck++;
/*
* Ensure that the APT succeeded.
*
* With the check below that count_stuck must be less
* than 10% of the overall generated raw entropy values
* it is guaranteed that the APT is invoked at
* floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times.
*/
if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) {
jent_apt_reset(&ec,
delta & JENT_APT_WORD_MASK);
if (jent_health_failure(&ec))
return JENT_EHEALTH;
}
}
/* Validate RCT */
if (jent_rct_failure(&ec))
return JENT_ERCT;
/* test whether we have an increasing timer */
if (!(time2 > time))

View File

@ -287,7 +287,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(ctx->child);
}
static void free_inst(struct skcipher_instance *inst)
static void crypto_lrw_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
@ -400,12 +400,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt;
inst->free = free_inst;
inst->free = crypto_lrw_free;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
free_inst(inst);
crypto_lrw_free(inst);
}
return err;
}

View File

@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
@ -31,10 +30,10 @@ EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
int blocks)
{
u32 temp[SHA_WORKSPACE_WORDS];
u32 temp[SHA1_WORKSPACE_WORDS];
while (blocks--) {
sha_transform(sst->state, src, temp);
sha1_transform(sst->state, src, temp);
src += SHA1_BLOCK_SIZE;
}
memzero_explicit(temp, sizeof(temp));

View File

@ -35,27 +35,31 @@ EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
static int crypto_sha256_init(struct shash_desc *desc)
{
return sha256_init(shash_desc_ctx(desc));
sha256_init(shash_desc_ctx(desc));
return 0;
}
static int crypto_sha224_init(struct shash_desc *desc)
{
return sha224_init(shash_desc_ctx(desc));
sha224_init(shash_desc_ctx(desc));
return 0;
}
int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha256_update(shash_desc_ctx(desc), data, len);
sha256_update(shash_desc_ctx(desc), data, len);
return 0;
}
EXPORT_SYMBOL(crypto_sha256_update);
static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
{
if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE)
return sha224_final(shash_desc_ctx(desc), out);
sha224_final(shash_desc_ctx(desc), out);
else
return sha256_final(shash_desc_ctx(desc), out);
sha256_final(shash_desc_ctx(desc), out);
return 0;
}
int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,

View File

@ -206,6 +206,22 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out)
{
SHASH_DESC_ON_STACK(desc, tfm);
int err;
desc->tfm = tfm;
err = crypto_shash_digest(desc, data, len, out);
shash_desc_zero(desc);
return err;
}
EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
static int shash_default_export(struct shash_desc *desc, void *out)
{
memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));

View File

@ -322,7 +322,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_cipher(ctx->tweak);
}
static void free_inst(struct skcipher_instance *inst)
static void crypto_xts_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
@ -434,12 +434,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt;
inst->free = free_inst;
inst->free = crypto_xts_free;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
free_inst(inst);
crypto_xts_free(inst);
}
return err;
}

View File

@ -474,6 +474,19 @@ config HW_RANDOM_KEYSTONE
help
This option enables Keystone's hardware random generator.
config HW_RANDOM_CCTRNG
tristate "Arm CryptoCell True Random Number Generator support"
depends on HAS_IOMEM && OF
help
Say 'Y' to enable the True Random Number Generator driver for the
Arm TrustZone CryptoCell family of processors.
Currently the CryptoCell 713 and 703 are supported.
The driver is supported only in SoC where Trusted Execution
Environment is not used.
Choose 'M' to compile this driver as a module. The module
will be called cctrng.
If unsure, say 'N'.
endif # HW_RANDOM
config UML_RANDOM

View File

@ -41,3 +41,4 @@ obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o

View File

@ -0,0 +1,735 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/workqueue.h>
#include <linux/circ_buf.h>
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/bitfield.h>
#include <linux/fips.h>
#include "cctrng.h"
#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
#define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \
(FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
#define CC_HW_RESET_LOOP_COUNT 10
#define CC_TRNG_SUSPEND_TIMEOUT 3000
/* data circular buffer in words must be:
* - of a power-of-2 size (limitation of circ_buf.h macros)
* - at least 6, the size generated in the EHR according to HW implementation
*/
#define CCTRNG_DATA_BUF_WORDS 32
/* The timeout for the TRNG operation should be calculated with the formula:
* Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
* while:
* - SAMPLE_CNT is input value from the characterisation process
* - all the rest are constants
*/
#define EHR_NUM 1
#define VN_COEFF 4
#define EHR_LENGTH CC_TRNG_EHR_IN_BITS
#define SCALE_VALUE 2
#define CCTRNG_TIMEOUT(smpl_cnt) \
(EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
struct cctrng_drvdata {
struct platform_device *pdev;
void __iomem *cc_base;
struct clk *clk;
struct hwrng rng;
u32 active_rosc;
/* Sampling interval for each ring oscillator:
* count of ring oscillator cycles between consecutive bits sampling.
* Value of 0 indicates non-valid rosc
*/
u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
u32 data_buf[CCTRNG_DATA_BUF_WORDS];
struct circ_buf circ;
struct work_struct compwork;
struct work_struct startwork;
/* pending_hw - 1 when HW is pending, 0 when it is idle */
atomic_t pending_hw;
/* protects against multiple concurrent consumers of data_buf */
spinlock_t read_lock;
};
/* functions for write/read CC registers */
static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
{
iowrite32(val, (drvdata->cc_base + reg));
}
static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
{
return ioread32(drvdata->cc_base + reg);
}
static int cc_trng_pm_get(struct device *dev)
{
int rc = 0;
rc = pm_runtime_get_sync(dev);
/* pm_runtime_get_sync() can return 1 as a valid return code */
return (rc == 1 ? 0 : rc);
}
static void cc_trng_pm_put_suspend(struct device *dev)
{
int rc = 0;
pm_runtime_mark_last_busy(dev);
rc = pm_runtime_put_autosuspend(dev);
if (rc)
dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
}
static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
{
struct device *dev = &(drvdata->pdev->dev);
/* must be before the enabling to avoid redundant suspending */
pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
/* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
return pm_runtime_set_active(dev);
}
static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
{
struct device *dev = &(drvdata->pdev->dev);
/* enable the PM module*/
pm_runtime_enable(dev);
}
static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
{
struct device *dev = &(drvdata->pdev->dev);
pm_runtime_disable(dev);
}
static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
{
struct device *dev = &(drvdata->pdev->dev);
struct device_node *np = drvdata->pdev->dev.of_node;
int rc;
int i;
/* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
int ret = -EINVAL;
rc = of_property_read_u32_array(np, "arm,rosc-ratio",
drvdata->smpl_ratio,
CC_TRNG_NUM_OF_ROSCS);
if (rc) {
/* arm,rosc-ratio was not found in device tree */
return rc;
}
/* verify that at least one rosc has (sampling ratio > 0) */
for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
dev_dbg(dev, "rosc %d sampling ratio %u",
i, drvdata->smpl_ratio[i]);
if (drvdata->smpl_ratio[i] > 0)
ret = 0;
}
return ret;
}
static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
{
struct device *dev = &(drvdata->pdev->dev);
dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
drvdata->active_rosc += 1;
while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
return 0;
drvdata->active_rosc += 1;
}
return -EINVAL;
}
static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
{
u32 max_cycles;
/* Set watchdog threshold to maximal allowed time (in CPU cycles) */
max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
/* enable the RND source */
cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
/* unmask RNG interrupts */
cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
}
/* increase circular data buffer index (head/tail) */
static inline void circ_idx_inc(int *idx, int bytes)
{
*idx += (bytes + 3) >> 2;
*idx &= (CCTRNG_DATA_BUF_WORDS - 1);
}
static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
{
return CIRC_SPACE(drvdata->circ.head,
drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
}
static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
/* current implementation ignores "wait" */
struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
struct device *dev = &(drvdata->pdev->dev);
u32 *buf = (u32 *)drvdata->circ.buf;
size_t copied = 0;
size_t cnt_w;
size_t size;
size_t left;
if (!spin_trylock(&drvdata->read_lock)) {
/* concurrent consumers from data_buf cannot be served */
dev_dbg_ratelimited(dev, "unable to hold lock\n");
return 0;
}
/* copy till end of data buffer (without wrap back) */
cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
size = min((cnt_w<<2), max);
memcpy(data, &(buf[drvdata->circ.tail]), size);
copied = size;
circ_idx_inc(&drvdata->circ.tail, size);
/* copy rest of data in data buffer */
left = max - copied;
if (left > 0) {
cnt_w = CIRC_CNT(drvdata->circ.head,
drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
size = min((cnt_w<<2), left);
memcpy(data, &(buf[drvdata->circ.tail]), size);
copied += size;
circ_idx_inc(&drvdata->circ.tail, size);
}
spin_unlock(&drvdata->read_lock);
if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
/* re-check space in buffer to avoid potential race */
if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
/* increment device's usage counter */
int rc = cc_trng_pm_get(dev);
if (rc) {
dev_err(dev,
"cc_trng_pm_get returned %x\n",
rc);
return rc;
}
/* schedule execution of deferred work handler
* for filling of data buffer
*/
schedule_work(&drvdata->startwork);
} else {
atomic_set(&drvdata->pending_hw, 0);
}
}
}
return copied;
}
static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
{
u32 tmp_smpl_cnt = 0;
struct device *dev = &(drvdata->pdev->dev);
dev_dbg(dev, "cctrng hw trigger.\n");
/* enable the HW RND clock */
cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
/* do software reset */
cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
/* in order to verify that the reset has completed,
* the sample count need to be verified
*/
do {
/* enable the HW RND clock */
cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
/* set sampling ratio (rng_clocks) between consecutive bits */
cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
drvdata->smpl_ratio[drvdata->active_rosc]);
/* read the sampling ratio */
tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
} while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
/* disable the RND source for setting new parameters in HW */
cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
/* Debug Control register: set to 0 - no bypasses */
cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
cc_trng_enable_rnd_source(drvdata);
}
static void cc_trng_compwork_handler(struct work_struct *w)
{
u32 isr = 0;
u32 ehr_valid = 0;
struct cctrng_drvdata *drvdata =
container_of(w, struct cctrng_drvdata, compwork);
struct device *dev = &(drvdata->pdev->dev);
int i;
/* stop DMA and the RNG source */
cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
/* read RNG_ISR and check for errors */
isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
fips_fail_notify();
/* FIPS error is fatal */
panic("Got HW CRNGT error while fips is enabled!\n");
}
/* Clear all pending RNG interrupts */
cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
if (!ehr_valid) {
/* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
dev_dbg(dev, "cctrng autocorr/timeout error.\n");
goto next_rosc;
}
/* in case of VN error, ignore it */
}
/* read EHR data from registers */
for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
/* calc word ptr in data_buf */
u32 *buf = (u32 *)drvdata->circ.buf;
buf[drvdata->circ.head] = cc_ioread(drvdata,
CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
/* EHR_DATA registers are cleared on read. In case 0 value was
* returned, restart the entropy collection.
*/
if (buf[drvdata->circ.head] == 0) {
dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
drvdata->active_rosc);
goto next_rosc;
}
circ_idx_inc(&drvdata->circ.head, 1<<2);
}
atomic_set(&drvdata->pending_hw, 0);
/* continue to fill data buffer if needed */
if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
/* Re-enable rnd source */
cc_trng_enable_rnd_source(drvdata);
return;
}
}
cc_trng_pm_put_suspend(dev);
dev_dbg(dev, "compwork handler done\n");
return;
next_rosc:
if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
(cc_trng_change_rosc(drvdata) == 0)) {
/* trigger trng hw with next rosc */
cc_trng_hw_trigger(drvdata);
} else {
atomic_set(&drvdata->pending_hw, 0);
cc_trng_pm_put_suspend(dev);
}
}
static irqreturn_t cc_isr(int irq, void *dev_id)
{
struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
struct device *dev = &(drvdata->pdev->dev);
u32 irr;
/* if driver suspended return, probably shared interrupt */
if (pm_runtime_suspended(dev))
return IRQ_NONE;
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
if (irr == 0) /* Probably shared interrupt line */
return IRQ_NONE;
/* clear interrupt - must be before processing events */
cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
/* RNG interrupt - most probable */
if (irr & CC_HOST_RNG_IRQ_MASK) {
/* Mask RNG interrupts - will be unmasked in deferred work */
cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
/* We clear RNG interrupt here,
* to avoid it from firing as we'll unmask RNG interrupts.
*/
cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
CC_HOST_RNG_IRQ_MASK);
irr &= ~CC_HOST_RNG_IRQ_MASK;
/* schedule execution of deferred work handler */
schedule_work(&drvdata->compwork);
}
if (irr) {
dev_dbg_ratelimited(dev,
"IRR includes unknown cause bits (0x%08X)\n",
irr);
/* Just warning */
}
return IRQ_HANDLED;
}
static void cc_trng_startwork_handler(struct work_struct *w)
{
struct cctrng_drvdata *drvdata =
container_of(w, struct cctrng_drvdata, startwork);
drvdata->active_rosc = 0;
cc_trng_hw_trigger(drvdata);
}
static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
{
struct clk *clk;
struct device *dev = &(drvdata->pdev->dev);
int rc = 0;
clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
dev_err(dev, "Error getting clock: %pe\n", clk);
return PTR_ERR(clk);
}
drvdata->clk = clk;
rc = clk_prepare_enable(drvdata->clk);
if (rc) {
dev_err(dev, "Failed to enable clock\n");
return rc;
}
return 0;
}
static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
{
clk_disable_unprepare(drvdata->clk);
}
static int cctrng_probe(struct platform_device *pdev)
{
struct resource *req_mem_cc_regs = NULL;
struct cctrng_drvdata *drvdata;
struct device *dev = &pdev->dev;
int rc = 0;
u32 val;
int irq;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
if (!drvdata->rng.name)
return -ENOMEM;
drvdata->rng.read = cctrng_read;
drvdata->rng.priv = (unsigned long)drvdata;
drvdata->rng.quality = CC_TRNG_QUALITY;
platform_set_drvdata(pdev, drvdata);
drvdata->pdev = pdev;
drvdata->circ.buf = (char *)drvdata->data_buf;
/* Get device resources */
/* First CC registers space */
req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* Map registers space */
drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
if (IS_ERR(drvdata->cc_base)) {
dev_err(dev, "Failed to ioremap registers");
return PTR_ERR(drvdata->cc_base);
}
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
req_mem_cc_regs);
dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
&req_mem_cc_regs->start, drvdata->cc_base);
/* Then IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "Failed getting IRQ resource\n");
return irq;
}
/* parse sampling rate from device tree */
rc = cc_trng_parse_sampling_ratio(drvdata);
if (rc) {
dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
return rc;
}
rc = cc_trng_clk_init(drvdata);
if (rc) {
dev_err(dev, "cc_trng_clk_init failed\n");
return rc;
}
INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
spin_lock_init(&drvdata->read_lock);
/* register the driver isr function */
rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
if (rc) {
dev_err(dev, "Could not register to interrupt %d\n", irq);
goto post_clk_err;
}
dev_dbg(dev, "Registered to IRQ: %d\n", irq);
/* Clear all pending interrupts */
val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
dev_dbg(dev, "IRR=0x%08X\n", val);
cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
/* unmask HOST RNG interrupt */
cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
~CC_HOST_RNG_IRQ_MASK);
/* init PM */
rc = cc_trng_pm_init(drvdata);
if (rc) {
dev_err(dev, "cc_trng_pm_init failed\n");
goto post_clk_err;
}
/* increment device's usage counter */
rc = cc_trng_pm_get(dev);
if (rc) {
dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
goto post_pm_err;
}
/* set pending_hw to verify that HW won't be triggered from read */
atomic_set(&drvdata->pending_hw, 1);
/* registration of the hwrng device */
rc = hwrng_register(&drvdata->rng);
if (rc) {
dev_err(dev, "Could not register hwrng device.\n");
goto post_pm_err;
}
/* trigger HW to start generate data */
drvdata->active_rosc = 0;
cc_trng_hw_trigger(drvdata);
/* All set, we can allow auto-suspend */
cc_trng_pm_go(drvdata);
dev_info(dev, "ARM cctrng device initialized\n");
return 0;
post_pm_err:
cc_trng_pm_fini(drvdata);
post_clk_err:
cc_trng_clk_fini(drvdata);
return rc;
}
static int cctrng_remove(struct platform_device *pdev)
{
struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
dev_dbg(dev, "Releasing cctrng resources...\n");
hwrng_unregister(&drvdata->rng);
cc_trng_pm_fini(drvdata);
cc_trng_clk_fini(drvdata);
dev_info(dev, "ARM cctrng device terminated\n");
return 0;
}
static int __maybe_unused cctrng_suspend(struct device *dev)
{
struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
POWER_DOWN_ENABLE);
clk_disable_unprepare(drvdata->clk);
return 0;
}
static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
{
unsigned int val;
unsigned int i;
for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
/* in cc7x3 NVM_IS_IDLE indicates that CC reset is
* completed and device is fully functional
*/
val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
/* hw indicate reset completed */
return true;
}
/* allow scheduling other process on the processor */
schedule();
}
/* reset not completed */
return false;
}
static int __maybe_unused cctrng_resume(struct device *dev)
{
struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
int rc;
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
/* Enables the device source clk */
rc = clk_prepare_enable(drvdata->clk);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
/* wait for Cryptocell reset completion */
if (!cctrng_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
return -EBUSY;
}
/* unmask HOST RNG interrupt */
cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
~CC_HOST_RNG_IRQ_MASK);
cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
POWER_DOWN_DISABLE);
return 0;
}
static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
static const struct of_device_id arm_cctrng_dt_match[] = {
{ .compatible = "arm,cryptocell-713-trng", },
{ .compatible = "arm,cryptocell-703-trng", },
{},
};
MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
static struct platform_driver cctrng_driver = {
.driver = {
.name = "cctrng",
.of_match_table = arm_cctrng_dt_match,
.pm = &cctrng_pm,
},
.probe = cctrng_probe,
.remove = cctrng_remove,
};
static int __init cctrng_mod_init(void)
{
/* Compile time assertion checks */
BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
return platform_driver_register(&cctrng_driver);
}
module_init(cctrng_mod_init);
static void __exit cctrng_mod_exit(void)
{
platform_driver_unregister(&cctrng_driver);
}
module_exit(cctrng_mod_exit);
/* Module description */
MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
MODULE_AUTHOR("ARM");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
#include <linux/bitops.h>
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
/* hwrng quality: bits of true entropy per 1024 bits of input */
#define CC_TRNG_QUALITY 1024
/* CryptoCell TRNG HW definitions */
#define CC_TRNG_NUM_OF_ROSCS 4
/* The number of words generated in the entropy holding register (EHR)
* 6 words (192 bit) according to HW implementation
*/
#define CC_TRNG_EHR_IN_WORDS 6
#define CC_TRNG_EHR_IN_BITS (CC_TRNG_EHR_IN_WORDS * BITS_PER_TYPE(u32))
#define CC_HOST_RNG_IRQ_MASK BIT(CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT)
/* RNG interrupt mask */
#define CC_RNG_INT_MASK (BIT(CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT) | \
BIT(CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT) | \
BIT(CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT) | \
BIT(CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT) | \
BIT(CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT))
// --------------------------------------
// BLOCK: RNG
// --------------------------------------
#define CC_RNG_IMR_REG_OFFSET 0x0100UL
#define CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT 0x0UL
#define CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT 0x1UL
#define CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT 0x2UL
#define CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT 0x3UL
#define CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT 0x4UL
#define CC_RNG_ISR_REG_OFFSET 0x0104UL
#define CC_RNG_ISR_EHR_VALID_BIT_SHIFT 0x0UL
#define CC_RNG_ISR_EHR_VALID_BIT_SIZE 0x1UL
#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SHIFT 0x1UL
#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SIZE 0x1UL
#define CC_RNG_ISR_CRNGT_ERR_BIT_SHIFT 0x2UL
#define CC_RNG_ISR_CRNGT_ERR_BIT_SIZE 0x1UL
#define CC_RNG_ISR_WATCHDOG_BIT_SHIFT 0x4UL
#define CC_RNG_ISR_WATCHDOG_BIT_SIZE 0x1UL
#define CC_RNG_ICR_REG_OFFSET 0x0108UL
#define CC_TRNG_CONFIG_REG_OFFSET 0x010CUL
#define CC_EHR_DATA_0_REG_OFFSET 0x0114UL
#define CC_RND_SOURCE_ENABLE_REG_OFFSET 0x012CUL
#define CC_SAMPLE_CNT1_REG_OFFSET 0x0130UL
#define CC_TRNG_DEBUG_CONTROL_REG_OFFSET 0x0138UL
#define CC_RNG_SW_RESET_REG_OFFSET 0x0140UL
#define CC_RNG_CLK_ENABLE_REG_OFFSET 0x01C4UL
#define CC_RNG_DMA_ENABLE_REG_OFFSET 0x01C8UL
#define CC_RNG_WATCHDOG_VAL_REG_OFFSET 0x01D8UL
// --------------------------------------
// BLOCK: SEC_HOST_RGF
// --------------------------------------
#define CC_HOST_RGF_IRR_REG_OFFSET 0x0A00UL
#define CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT 0xAUL
#define CC_HOST_RGF_IMR_REG_OFFSET 0x0A04UL
#define CC_HOST_RGF_ICR_REG_OFFSET 0x0A08UL
#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0x0A78UL
// --------------------------------------
// BLOCK: NVM
// --------------------------------------
#define CC_NVM_IS_IDLE_REG_OFFSET 0x0F10UL
#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL

View File

@ -392,11 +392,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "%s: error getting IRQ resource - %d\n",
__func__, irq);
if (irq < 0)
return irq;
}
err = devm_request_irq(dev, irq, omap4_rng_irq,
IRQF_TRIGGER_NONE, dev_name(dev), priv);

View File

@ -226,7 +226,7 @@ static int optee_rng_probe(struct device *dev)
return -ENODEV;
/* Open session with hwrng Trusted App */
memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
export_uuid(sess_arg.uuid, &rng_device->id.uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;

View File

@ -328,10 +328,8 @@ static int xgene_rng_probe(struct platform_device *pdev)
return PTR_ERR(ctx->csr_base);
rc = platform_get_irq(pdev, 0);
if (rc < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
if (rc < 0)
return rc;
}
ctx->irq = rc;
dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",

View File

@ -327,7 +327,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
@ -337,6 +336,7 @@
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha.h>
#include <crypto/sha.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@ -1397,14 +1397,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
__u32 w[5];
unsigned long l[LONGS(20)];
} hash;
__u32 workspace[SHA_WORKSPACE_WORDS];
__u32 workspace[SHA1_WORKSPACE_WORDS];
unsigned long flags;
/*
* If we have an architectural hardware random number
* generator, use it for SHA's initial vector
*/
sha_init(hash.w);
sha1_init(hash.w);
for (i = 0; i < LONGS(20); i++) {
unsigned long v;
if (!arch_get_random_long(&v))
@ -1415,7 +1415,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
/* Generate a hash across the pool, 16 words (512 bits) at a time */
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
* We mix the hash back into the pool to prevent backtracking

View File

@ -537,10 +537,8 @@ static int sun8i_ss_probe(struct platform_device *pdev)
return err;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n");
if (irq < 0)
return irq;
}
ss->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(ss->reset)) {

View File

@ -253,10 +253,8 @@ static int meson_crypto_probe(struct platform_device *pdev)
mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
for (i = 0; i < MAXFLOW; i++) {
mc->irqs[i] = platform_get_irq(pdev, i);
if (mc->irqs[i] < 0) {
dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i);
if (mc->irqs[i] < 0)
return mc->irqs[i];
}
err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
"gxl-crypto", mc);

View File

@ -31,7 +31,6 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>

View File

@ -2239,16 +2239,12 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
if (keylen > blocksize) {
SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
hdesc->tfm = tfm_ctx->child_hash;
tfm_ctx->hmac_key_length = blocksize;
ret = crypto_shash_digest(hdesc, key, keylen,
tfm_ctx->hmac_key);
ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
tfm_ctx->hmac_key);
if (ret)
return ret;
} else {
memcpy(tfm_ctx->hmac_key, key, keylen);
tfm_ctx->hmac_key_length = keylen;

View File

@ -308,9 +308,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
container_of(areq, struct skcipher_request, base);
struct iproc_ctx_s *ctx = rctx->ctx;
struct spu_cipher_parms cipher_parms;
int err = 0;
unsigned int chunksize = 0; /* Num bytes of request to submit */
int remaining = 0; /* Bytes of request still to process */
int err;
unsigned int chunksize; /* Num bytes of request to submit */
int remaining; /* Bytes of request still to process */
int chunk_start; /* Beginning of data for current SPU msg */
/* IV or ctr value to use in this SPU msg */
@ -698,7 +698,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
/* number of bytes still to be hashed in this req */
unsigned int nbytes_to_hash = 0;
int err = 0;
int err;
unsigned int chunksize = 0; /* length of hash carry + new data */
/*
* length of new data, not from hash carry, to be submitted in
@ -1664,7 +1664,7 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
struct spu_hw *spu = &iproc_priv.spu;
struct brcm_message *mssg = msg;
struct iproc_reqctx_s *rctx;
int err = 0;
int err;
rctx = mssg->ctx;
if (unlikely(!rctx)) {
@ -1967,7 +1967,7 @@ static int ahash_enqueue(struct ahash_request *req)
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
int err = 0;
int err;
const char *alg_name;
flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
@ -2299,7 +2299,7 @@ ahash_finup_exit:
static int ahash_digest(struct ahash_request *req)
{
int err = 0;
int err;
flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
@ -4436,7 +4436,7 @@ static int spu_mb_init(struct device *dev)
for (i = 0; i < iproc_priv.spu.num_chan; i++) {
iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
if (IS_ERR(iproc_priv.mbox[i])) {
err = (int)PTR_ERR(iproc_priv.mbox[i]);
err = PTR_ERR(iproc_priv.mbox[i]);
dev_err(dev,
"Mbox channel %d request failed with err %d",
i, err);
@ -4717,21 +4717,20 @@ static int spu_dt_read(struct platform_device *pdev)
matched_spu_type = of_device_get_match_data(dev);
if (!matched_spu_type) {
dev_err(&pdev->dev, "Failed to match device\n");
dev_err(dev, "Failed to match device\n");
return -ENODEV;
}
spu->spu_type = matched_spu_type->type;
spu->spu_subtype = matched_spu_type->subtype;
i = 0;
for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
if (IS_ERR(spu->reg_vbase[i])) {
err = PTR_ERR(spu->reg_vbase[i]);
dev_err(&pdev->dev, "Failed to map registers: %d\n",
dev_err(dev, "Failed to map registers: %d\n",
err);
spu->reg_vbase[i] = NULL;
return err;
@ -4747,7 +4746,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
int err = 0;
int err;
iproc_priv.pdev = pdev;
platform_set_drvdata(iproc_priv.pdev,
@ -4757,7 +4756,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto failure;
err = spu_mb_init(&pdev->dev);
err = spu_mb_init(dev);
if (err < 0)
goto failure;
@ -4766,7 +4765,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
else if (spu->spu_type == SPU_TYPE_SPU2)
iproc_priv.bcm_hdr_len = 0;
spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
spu_counters_init();

View File

@ -346,7 +346,7 @@ static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
}
/**
* nitrox_bist_check - Check NITORX BIST registers status
* nitrox_bist_check - Check NITROX BIST registers status
* @ndev: NITROX device
*/
static int nitrox_bist_check(struct nitrox_device *ndev)

View File

@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD
config CRYPTO_DEV_SP_CCP
bool "Cryptographic Coprocessor device"
default y
depends on CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP_DD && DMADEVICES
select HW_RANDOM
select DMA_ENGINE
select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help

View File

@ -272,9 +272,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
SHASH_DESC_ON_STACK(sdesc, shash);
unsigned int block_size = crypto_shash_blocksize(shash);
unsigned int digest_size = crypto_shash_digestsize(shash);
int i, ret;
@ -289,10 +286,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
sdesc->tfm = shash;
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
ret = crypto_shash_tfm_digest(shash, key, key_len,
ctx->u.sha.key);
if (ret)
return -EINVAL;

View File

@ -20,6 +20,7 @@
#include <linux/hw_random.h>
#include <linux/ccp.h>
#include <linux/firmware.h>
#include <linux/gfp.h>
#include <asm/smp.h>
@ -44,6 +45,14 @@ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during
static bool psp_dead;
static int psp_timeout;
/* Trusted Memory Region (TMR):
* The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
* to allocate the memory, which will return aligned memory for the specified
* allocation order.
*/
#define SEV_ES_TMR_SIZE (1024 * 1024)
static void *sev_es_tmr;
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@ -214,6 +223,20 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
if (sev_es_tmr) {
u64 tmr_pa;
/*
* Do not include the encryption mask on the physical
* address of the TMR (firmware should clear it anyway).
*/
tmr_pa = __pa(sev_es_tmr);
sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES;
sev->init_cmd_buf.tmr_address = tmr_pa;
sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE;
}
rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
if (rc)
return rc;
@ -1012,6 +1035,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
struct page *tmr_page;
int error, rc;
if (!sev)
@ -1041,6 +1065,16 @@ void sev_pci_init(void)
sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
/* Obtain the TMR memory area for SEV-ES use */
tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE));
if (tmr_page) {
sev_es_tmr = page_address(tmr_page);
} else {
sev_es_tmr = NULL;
dev_warn(sev->dev,
"SEV: TMR allocation failed, SEV-ES support unavailable\n");
}
/* Initialize the platform */
rc = sev_platform_init(&error);
if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
@ -1075,4 +1109,13 @@ void sev_pci_exit(void)
return;
sev_platform_shutdown(NULL);
if (sev_es_tmr) {
/* The TMR area was encrypted, flush it from the cache */
wbinvd_on_all_cpus();
free_pages((unsigned long)sev_es_tmr,
get_order(SEV_ES_TMR_SIZE));
sev_es_tmr = NULL;
}
}

View File

@ -427,12 +427,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
int key_len = keylen >> 1;
int err;
SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
desc->tfm = ctx_p->shash_tfm;
err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
ctx_p->user.key + key_len);
err = crypto_shash_tfm_digest(ctx_p->shash_tfm,
ctx_p->user.key, key_len,
ctx_p->user.key + key_len);
if (err) {
dev_err(dev, "Failed to hash ESSIV key.\n");
return err;

View File

@ -26,7 +26,7 @@ static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "VERSION" }, /* Must be 1st */
};
static struct debugfs_reg32 pid_cid_regs[] = {
static const struct debugfs_reg32 pid_cid_regs[] = {
CC_DEBUG_REG(PERIPHERAL_ID_0),
CC_DEBUG_REG(PERIPHERAL_ID_1),
CC_DEBUG_REG(PERIPHERAL_ID_2),
@ -38,7 +38,7 @@ static struct debugfs_reg32 pid_cid_regs[] = {
CC_DEBUG_REG(COMPONENT_ID_3),
};
static struct debugfs_reg32 debug_regs[] = {
static const struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),

View File

@ -44,7 +44,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
@ -1757,7 +1756,7 @@ static int chcr_ahash_final(struct ahash_request *req)
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
struct chcr_context *ctx = h_ctx(rtfm);
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
int error = -EINVAL;
int error;
unsigned int cpu;
cpu = get_cpu();

View File

@ -40,7 +40,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>

View File

@ -29,6 +29,7 @@ config CRYPTO_DEV_HISI_SEC2
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on ACPI
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
@ -42,6 +43,7 @@ config CRYPTO_DEV_HISI_QM
depends on ARM64 || COMPILE_TEST
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ACPI
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
@ -52,6 +54,7 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
depends on UACCE || UACCE=n
depends on ACPI
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon ZIP Driver
@ -61,6 +64,7 @@ config CRYPTO_DEV_HISI_HPRE
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on ACPI
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA

View File

@ -25,6 +25,17 @@ enum hpre_ctrl_dbgfs_file {
HPRE_DEBUG_FILE_NUM,
};
enum hpre_dfx_dbgfs_file {
HPRE_SEND_CNT,
HPRE_RECV_CNT,
HPRE_SEND_FAIL_CNT,
HPRE_SEND_BUSY_CNT,
HPRE_OVER_THRHLD_CNT,
HPRE_OVERTIME_THRHLD,
HPRE_INVALID_REQ_CNT,
HPRE_DFX_FILE_NUM
};
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
struct hpre_debugfs_file {
@ -34,6 +45,11 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
struct hpre_dfx {
atomic64_t value;
enum hpre_dfx_dbgfs_file type;
};
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
@ -41,13 +57,13 @@ struct hpre_debugfs_file {
*/
struct hpre_debug {
struct dentry *debug_root;
struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
struct hpre {
struct hisi_qm qm;
struct hpre_debug debug;
u32 num_vfs;
unsigned long status;
};

View File

@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <linux/module.h>
#include <linux/time.h>
#include "hpre.h"
struct hpre_ctx;
@ -32,6 +33,9 @@ struct hpre_ctx;
#define HPRE_SQE_DONE_SHIFT 30
#define HPRE_DH_MAX_P_SZ 512
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@ -68,6 +72,7 @@ struct hpre_dh_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
@ -90,6 +95,7 @@ struct hpre_asym_request {
int err;
int req_id;
hpre_cb cb;
struct timespec64 req_time;
};
static DEFINE_MUTEX(hpre_alg_lock);
@ -119,6 +125,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx;
struct hpre_dfx *dfx;
int id;
ctx = hpre_req->ctx;
@ -129,6 +136,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx->req_list[id] = hpre_req;
hpre_req->req_id = id;
dfx = ctx->hpre->debug.dfx;
if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
ktime_get_ts64(&hpre_req->req_time);
return id;
}
@ -309,12 +320,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{
struct hpre *hpre;
if (!ctx || !qp || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
hpre = container_of(ctx->qp->qm, struct hpre, qm);
ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list)
return -ENOMEM;
@ -337,38 +352,80 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
ctx->key_sz = 0;
}
static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
u64 overtime_thrhld)
{
struct timespec64 reply_time;
u64 time_use_us;
ktime_get_ts64(&reply_time);
time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
HPRE_DFX_SEC_TO_US +
(reply_time.tv_nsec - req->req_time.tv_nsec) /
HPRE_DFX_US_TO_NS;
if (time_use_us <= overtime_thrhld)
return false;
return true;
}
static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
{
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct kpp_request *areq;
u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.dh;
areq->dst_len = ctx->key_sz;
overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
{
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct akcipher_request *areq;
u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
areq = req->areq.rsa;
areq->dst_len = ctx->key_sz;
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
akcipher_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
{
struct hpre_ctx *ctx = qp->qp_ctx;
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_sqe *sqe = resp;
struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
if (unlikely(!req)) {
atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
return;
}
req->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@ -436,6 +493,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
return 0;
}
static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
{
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
int ctr = 0;
int ret;
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
ret = hisi_qp_send(ctx->qp, msg);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
} while (ctr++ < HPRE_TRY_SEND_TIMES);
if (likely(!ret))
return ret;
if (ret != -EBUSY)
atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
return ret;
}
#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
@ -444,7 +524,6 @@ static int hpre_dh_compute_value(struct kpp_request *req)
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
int ctr = 0;
int ret;
ret = hpre_msg_request_set(ctx, req, false);
@ -465,11 +544,9 @@ static int hpre_dh_compute_value(struct kpp_request *req)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@ -647,7 +724,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@ -677,11 +753,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@ -699,7 +772,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@ -736,11 +808,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;

View File

@ -59,10 +59,6 @@
#define HPRE_HAC_ECC2_CNT 0x301a08
#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
#define MASTER_TRANS_RETURN_RW 3
#define HPRE_MASTER_TRANS_RETURN 0x300150
#define HPRE_MASTER_GLOBAL_CTRL 0x300000
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
@ -80,7 +76,16 @@
#define HPRE_BD_USR_MASK 0x3
#define HPRE_CLUSTER_CORE_MASK 0xf
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
#define HPRE_WR_MSI_PORT BIT(2)
#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
@ -131,7 +136,7 @@ static const u64 hpre_cluster_offsets[] = {
HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
};
static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
{"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
{"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
{"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
@ -139,7 +144,7 @@ static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
{"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
};
static struct debugfs_reg32 hpre_com_dfx_regs[] = {
static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
{"AXQOS ", HPRE_VFG_AXQOS},
{"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
@ -156,44 +161,38 @@ static struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"INT_STATUS ", HPRE_INT_STATUS},
};
static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
"send_cnt",
"recv_cnt",
"send_fail_cnt",
"send_busy_cnt",
"over_thrhld_cnt",
"overtime_thrhld",
"invalid_req_cnt"
};
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
struct pci_dev *pdev;
u32 n, q_num;
u8 rev_id;
int ret;
if (!val)
return -EINVAL;
pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
if (!pdev) {
q_num = HPRE_QUEUE_NUM_V2;
pr_info("No device found currently, suppose queue number is %d\n",
q_num);
} else {
rev_id = pdev->revision;
if (rev_id != QM_HW_V2)
return -EINVAL;
q_num = HPRE_QUEUE_NUM_V2;
}
ret = kstrtou32(val, 10, &n);
if (ret != 0 || n == 0 || n > q_num)
return -EINVAL;
return param_set_int(val, kp);
return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
.set = hpre_pf_q_num_set,
.set = pf_q_num_set,
.get = param_get_int,
};
static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
.get = param_get_int,
};
static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
struct hisi_qp *hpre_create_qp(void)
{
@ -232,9 +231,8 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
static int hpre_set_user_domain_and_cache(struct hpre *hpre)
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
unsigned long offset;
int ret, i;
@ -324,17 +322,34 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
u32 val;
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when m-bit error occur */
val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
u32 val;
/* clear HPRE hw error source if having */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
/* enable hpre hw error interrupts */
writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when m-bit error occur */
val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
val |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@ -354,9 +369,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
struct hpre_debug *debug = file->debug;
struct hpre *hpre = container_of(debug, struct hpre, debug);
u32 num_vfs = hpre->num_vfs;
u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
@ -523,6 +536,33 @@ static const struct file_operations hpre_ctrl_debug_fops = {
.write = hpre_ctrl_debug_write,
};
static int hpre_debugfs_atomic64_get(void *data, u64 *val)
{
struct hpre_dfx *dfx_item = data;
*val = atomic64_read(&dfx_item->value);
return 0;
}
static int hpre_debugfs_atomic64_set(void *data, u64 val)
{
struct hpre_dfx *dfx_item = data;
struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
if (val)
return -EINVAL;
if (dfx_item->type == HPRE_OVERTIME_THRHLD)
atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
atomic64_set(&dfx_item->value, val);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
hpre_debugfs_atomic64_set, "%llu\n");
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@ -620,6 +660,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
static void hpre_dfx_debug_init(struct hpre_debug *debug)
{
struct hpre *hpre = container_of(debug, struct hpre, debug);
struct hpre_dfx *dfx = hpre->debug.dfx;
struct hisi_qm *qm = &hpre->qm;
struct dentry *parent;
int i;
parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
dfx[i].type = i;
debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
&hpre_atomic64_ops);
}
}
static int hpre_debugfs_init(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
@ -629,6 +685,8 @@ static int hpre_debugfs_init(struct hpre *hpre)
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
qm->debug.debug_root = dir;
qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
@ -640,6 +698,9 @@ static int hpre_debugfs_init(struct hpre *hpre)
if (ret)
goto failed_to_create;
}
hpre_dfx_debug_init(&hpre->debug);
return 0;
failed_to_create:
@ -654,32 +715,27 @@ static void hpre_debugfs_exit(struct hpre *hpre)
debugfs_remove_recursive(qm->debug.debug_root);
}
static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
enum qm_hw_ver rev_id;
rev_id = hisi_qm_get_hw_version(pdev);
if (rev_id < 0)
return -ENODEV;
if (rev_id == QM_HW_V1) {
if (pdev->revision == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
qm->pdev = pdev;
qm->ver = rev_id;
qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
QM_HW_PF : QM_HW_VF;
if (pdev->is_physfn) {
qm->qp_base = HPRE_PF_DEF_Q_BASE;
qm->qp_num = hpre_pf_q_num;
}
qm->use_dma_api = true;
return 0;
qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
QM_HW_PF : QM_HW_VF;
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HPRE_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
qm->qm_list = &hpre_devices;
}
return hisi_qm_init(qm);
}
static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
@ -693,8 +749,6 @@ static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
err->msg, err->int_msk);
err++;
}
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
@ -702,16 +756,38 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + HPRE_HAC_INT_STATUS);
}
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 value;
value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
}
static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_init = hpre_set_user_domain_and_cache,
.hw_err_enable = hpre_hw_error_enable,
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
.fe = 0,
.msi = QM_DB_RANDOM_INVALID,
.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
HPRE_OOO_ECC_2BIT_ERR,
.msi_wr_port = HPRE_WR_MSI_PORT,
.acpi_rst = "HRST",
}
};
@ -722,7 +798,7 @@ static int hpre_pf_probe_init(struct hpre *hpre)
qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
ret = hpre_set_user_domain_and_cache(hpre);
ret = hpre_set_user_domain_and_cache(qm);
if (ret)
return ret;
@ -732,6 +808,20 @@ static int hpre_pf_probe_init(struct hpre *hpre)
return 0;
}
static int hpre_probe_init(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
int ret;
if (qm->fun_type == QM_HW_PF) {
ret = hpre_pf_probe_init(hpre);
if (ret)
return ret;
}
return 0;
}
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_qm *qm;
@ -742,26 +832,17 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hpre)
return -ENOMEM;
pci_set_drvdata(pdev, hpre);
qm = &hpre->qm;
ret = hpre_qm_pre_init(qm, pdev);
if (ret)
ret = hpre_qm_init(qm, pdev);
if (ret) {
pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
return ret;
}
ret = hisi_qm_init(qm);
if (ret)
return ret;
if (pdev->is_physfn) {
ret = hpre_pf_probe_init(hpre);
if (ret)
goto err_with_qm_init;
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) {
/* v2 starts to support get vft by mailbox */
ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
if (ret)
goto err_with_qm_init;
ret = hpre_probe_init(hpre);
if (ret) {
pci_err(pdev, "Failed to probe (%d)!\n", ret);
goto err_with_qm_init;
}
ret = hisi_qm_start(qm);
@ -779,8 +860,18 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
goto err_with_crypto_register;
}
return 0;
err_with_crypto_register:
hpre_algs_unregister();
err_with_qm_start:
hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm);
@ -794,107 +885,6 @@ err_with_qm_init:
return ret;
}
static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
{
struct hisi_qm *qm = &hpre->qm;
u32 qp_num = qm->qp_num;
int q_num, remain_q_num, i;
u32 q_base = qp_num;
int ret;
if (!num_vfs)
return -EINVAL;
remain_q_num = qm->ctrl_qp_num - qp_num;
/* If remaining queues are not enough, return error. */
if (remain_q_num < num_vfs)
return -EINVAL;
q_num = remain_q_num / num_vfs;
for (i = 1; i <= num_vfs; i++) {
if (i == num_vfs)
q_num += remain_q_num % num_vfs;
ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
if (ret)
return ret;
q_base += q_num;
}
return 0;
}
static int hpre_clear_vft_config(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
u32 num_vfs = hpre->num_vfs;
int ret;
u32 i;
for (i = 1; i <= num_vfs; i++) {
ret = hisi_qm_set_vft(qm, i, 0, 0);
if (ret)
return ret;
}
hpre->num_vfs = 0;
return 0;
}
static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
{
struct hpre *hpre = pci_get_drvdata(pdev);
int pre_existing_vfs, num_vfs, ret;
pre_existing_vfs = pci_num_vf(pdev);
if (pre_existing_vfs) {
pci_err(pdev,
"Can't enable VF. Please disable pre-enabled VFs!\n");
return 0;
}
num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
ret = hpre_vf_q_assign(hpre, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
return ret;
}
hpre->num_vfs = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
hpre_clear_vft_config(hpre);
return ret;
}
return num_vfs;
}
static int hpre_sriov_disable(struct pci_dev *pdev)
{
struct hpre *hpre = pci_get_drvdata(pdev);
if (pci_vfs_assigned(pdev)) {
pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
return -EPERM;
}
/* remove in hpre_pci_driver will be called to free VF resources */
pci_disable_sriov(pdev);
return hpre_clear_vft_config(hpre);
}
static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs)
return hpre_sriov_enable(pdev, num_vfs);
else
return hpre_sriov_disable(pdev);
}
static void hpre_remove(struct pci_dev *pdev)
{
struct hpre *hpre = pci_get_drvdata(pdev);
@ -903,8 +893,8 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_algs_unregister();
hisi_qm_del_from_list(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
ret = hpre_sriov_disable(pdev);
if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
ret = hisi_qm_sriov_disable(pdev);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
@ -924,6 +914,9 @@ static void hpre_remove(struct pci_dev *pdev)
static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
.reset_prepare = hisi_qm_reset_prepare,
.reset_done = hisi_qm_reset_done,
};
static struct pci_driver hpre_pci_driver = {
@ -931,7 +924,7 @@ static struct pci_driver hpre_pci_driver = {
.id_table = hpre_dev_ids,
.probe = hpre_probe,
.remove = hpre_remove,
.sriov_configure = hpre_sriov_configure,
.sriov_configure = hisi_qm_sriov_configure,
.err_handler = &hpre_err_handler,
};

File diff suppressed because it is too large Load Diff

View File

@ -8,6 +8,10 @@
#include <linux/module.h>
#include <linux/pci.h>
#define QM_QNUM_V1 4096
#define QM_QNUM_V2 1024
#define QM_MAX_VFS_NUM_V2 63
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@ -70,7 +74,7 @@
#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
QM_OF_FIFO_OF)
QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
@ -80,14 +84,31 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
enum qm_stop_reason {
QM_NORMAL,
QM_SOFT_RESET,
QM_FLR,
};
enum qm_state {
QM_INIT = 0,
QM_START,
QM_CLOSE,
QM_STOP,
};
enum qp_state {
QP_INIT = 1,
QP_START,
QP_STOP,
QP_CLOSE,
};
enum qm_hw_ver {
QM_HW_UNKNOWN = -1,
QM_HW_V1 = 0x20,
QM_HW_V2 = 0x21,
QM_HW_V3 = 0x30,
};
enum qm_fun_type {
@ -101,6 +122,14 @@ enum qm_debug_file {
DEBUG_FILE_NUM,
};
struct qm_dfx {
atomic64_t err_irq_cnt;
atomic64_t aeq_irq_cnt;
atomic64_t abnormal_irq_cnt;
atomic64_t create_qp_err_cnt;
atomic64_t mb_err_cnt;
};
struct debugfs_file {
enum qm_debug_file index;
struct mutex lock;
@ -109,6 +138,9 @@ struct debugfs_file {
struct qm_debug {
u32 curr_qm_qp_num;
u32 sqe_mask_offset;
u32 sqe_mask_len;
struct qm_dfx dfx;
struct dentry *debug_root;
struct dentry *qm_d;
struct debugfs_file files[DEBUG_FILE_NUM];
@ -125,22 +157,34 @@ struct hisi_qm_status {
bool eqc_phase;
u32 aeq_head;
bool aeqc_phase;
unsigned long flags;
atomic_t flags;
int stop_reason;
};
struct hisi_qm;
struct hisi_qm_err_info {
char *acpi_rst;
u32 msi_wr_port;
u32 ecc_2bits_mask;
u32 ce;
u32 nfe;
u32 fe;
u32 msi;
};
struct hisi_qm_err_status {
u32 is_qm_ecc_mbit;
u32 is_dev_ecc_mbit;
};
struct hisi_qm_err_ini {
int (*hw_init)(struct hisi_qm *qm);
void (*hw_err_enable)(struct hisi_qm *qm);
void (*hw_err_disable)(struct hisi_qm *qm);
u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
void (*open_axi_master_ooo)(struct hisi_qm *qm);
void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
struct hisi_qm_err_info err_info;
};
@ -161,7 +205,9 @@ struct hisi_qm {
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
u32 vfs_num;
struct list_head list;
struct hisi_qm_list *qm_list;
struct qm_dma qdma;
struct qm_sqc *sqc;
@ -175,10 +221,12 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
struct hisi_qm_err_status err_status;
unsigned long reset_flag;
rwlock_t qps_lock;
unsigned long *qp_bitmap;
struct hisi_qp **qp_array;
struct rw_semaphore qps_lock;
struct idr qp_idr;
struct hisi_qp *qp_array;
struct mutex mailbox_lock;
@ -187,13 +235,12 @@ struct hisi_qm {
struct qm_debug debug;
u32 error_mask;
u32 msi_mask;
struct workqueue_struct *wq;
struct work_struct work;
struct work_struct rst_work;
const char *algs;
bool use_dma_api;
bool use_sva;
resource_size_t phys_base;
resource_size_t phys_size;
@ -205,7 +252,7 @@ struct hisi_qp_status {
u16 sq_tail;
u16 cq_head;
bool cqc_phase;
unsigned long flags;
atomic_t flags;
};
struct hisi_qp_ops {
@ -230,10 +277,58 @@ struct hisi_qp {
void (*event_cb)(struct hisi_qp *qp);
struct hisi_qm *qm;
bool is_resetting;
u16 pasid;
struct uacce_queue *uacce_q;
};
static inline int q_num_set(const char *val, const struct kernel_param *kp,
unsigned int device)
{
struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
device, NULL);
u32 n, q_num;
int ret;
if (!val)
return -EINVAL;
if (!pdev) {
q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
pr_info("No device found currently, suppose queue number is %d\n",
q_num);
} else {
if (pdev->revision == QM_HW_V1)
q_num = QM_QNUM_V1;
else
q_num = QM_QNUM_V2;
}
ret = kstrtou32(val, 10, &n);
if (ret || !n || n > q_num)
return -EINVAL;
return param_set_int(val, kp);
}
static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
{
u32 n;
int ret;
if (!val)
return -EINVAL;
ret = kstrtou32(val, 10, &n);
if (ret < 0)
return ret;
if (n > QM_MAX_VFS_NUM_V2)
return -EINVAL;
return param_set_int(val, kp);
}
static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
{
INIT_LIST_HEAD(&qm_list->list);
@ -267,14 +362,19 @@ void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
int hisi_qm_sriov_disable(struct pci_dev *pdev);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
void hisi_qm_dev_err_init(struct hisi_qm *qm);
void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
pci_channel_state_t state);
pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
void hisi_qm_reset_prepare(struct pci_dev *pdev);
void hisi_qm_reset_done(struct pci_dev *pdev);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,

View File

@ -160,6 +160,10 @@ struct sec_debug_file {
struct sec_dfx {
atomic64_t send_cnt;
atomic64_t recv_cnt;
atomic64_t send_busy_cnt;
atomic64_t err_bd_cnt;
atomic64_t invalid_req_cnt;
atomic64_t done_flag_cnt;
};
struct sec_debug {
@ -172,7 +176,6 @@ struct sec_dev {
struct sec_debug debug;
u32 ctx_q_num;
bool iommu_used;
u32 num_vfs;
unsigned long status;
};

View File

@ -148,6 +148,7 @@ static int sec_aead_verify(struct sec_req *req)
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
struct sec_sqe *bd = resp;
struct sec_ctx *ctx;
struct sec_req *req;
@ -157,11 +158,16 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
type = bd->type_cipher_auth & SEC_TYPE_MASK;
if (unlikely(type != SEC_BD_TYPE2)) {
atomic64_inc(&dfx->err_bd_cnt);
pr_err("err bd type [%d]\n", type);
return;
}
req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
if (unlikely(!req)) {
atomic64_inc(&dfx->invalid_req_cnt);
return;
}
req->err_type = bd->type2.error_type;
ctx = req->ctx;
done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
@ -174,12 +180,13 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
"err_type[%d],done[%d],flag[%d]\n",
req->err_type, done, flag);
err = -EIO;
atomic64_inc(&dfx->done_flag_cnt);
}
if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
err = sec_aead_verify(req);
atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
atomic64_inc(&dfx->recv_cnt);
ctx->req_op->buf_unmap(ctx, req);
@ -200,10 +207,12 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return -ENOBUFS;
if (!ret) {
if (req->fake_busy)
if (req->fake_busy) {
atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
ret = -EBUSY;
else
} else {
ret = -EINPROGRESS;
}
}
return ret;
@ -832,7 +841,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
struct crypto_authenc_keys *keys)
{
struct crypto_shash *hash_tfm = ctx->hash_tfm;
SHASH_DESC_ON_STACK(shash, hash_tfm);
int blocksize, ret;
if (!keys->authkeylen) {
@ -842,8 +850,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
blocksize = crypto_shash_blocksize(hash_tfm);
if (keys->authkeylen > blocksize) {
ret = crypto_shash_digest(shash, keys->authkey,
keys->authkeylen, ctx->a_key);
ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
keys->authkeylen, ctx->a_key);
if (ret) {
pr_err("hisi_sec2: aead auth digest error!\n");
return -EINVAL;

View File

@ -80,6 +80,9 @@
#define SEC_VF_CNT_MASK 0xffffffc0
#define SEC_DBGFS_VAL_MAX_LEN 20
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
@ -88,6 +91,11 @@ struct sec_hw_error {
const char *msg;
};
struct sec_dfx_item {
const char *name;
u32 offset;
};
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
static struct hisi_qm_list sec_devices;
@ -110,7 +118,16 @@ static const char * const sec_dbg_file_name[] = {
[SEC_CLEAR_ENABLE] = "clear_enable",
};
static struct debugfs_reg32 sec_dfx_regs[] = {
static struct sec_dfx_item sec_dfx_labels[] = {
{"send_cnt", offsetof(struct sec_dfx, send_cnt)},
{"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
{"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
{"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
{"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
{"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
};
static const struct debugfs_reg32 sec_dfx_regs[] = {
{"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"SEC_SAA_EN ", 0x301270},
{"SEC_BD_LATENCY_MIN ", 0x301600},
@ -136,45 +153,14 @@ static struct debugfs_reg32 sec_dfx_regs[] = {
static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
struct pci_dev *pdev;
u32 n, q_num;
u8 rev_id;
int ret;
if (!val)
return -EINVAL;
pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
SEC_PF_PCI_DEVICE_ID, NULL);
if (!pdev) {
q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
pr_info("No device, suppose queue number is %d!\n", q_num);
} else {
rev_id = pdev->revision;
switch (rev_id) {
case QM_HW_V1:
q_num = SEC_QUEUE_NUM_V1;
break;
case QM_HW_V2:
q_num = SEC_QUEUE_NUM_V2;
break;
default:
return -EINVAL;
}
}
ret = kstrtou32(val, 10, &n);
if (ret || !n || n > q_num)
return -EINVAL;
return param_set_int(val, kp);
return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
.set = sec_pf_q_num_set,
.get = param_get_int,
};
static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
@ -207,6 +193,15 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
.get = param_get_int,
};
static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
{
hisi_qm_free_qps(qps, qp_num);
@ -240,9 +235,8 @@ static const struct pci_device_id sec_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
static u8 sec_get_endian(struct sec_dev *sec)
static u8 sec_get_endian(struct hisi_qm *qm)
{
struct hisi_qm *qm = &sec->qm;
u32 reg;
/*
@ -270,9 +264,8 @@ static u8 sec_get_endian(struct sec_dev *sec)
return SEC_64BE;
}
static int sec_engine_init(struct sec_dev *sec)
static int sec_engine_init(struct hisi_qm *qm)
{
struct hisi_qm *qm = &sec->qm;
int ret;
u32 reg;
@ -315,7 +308,7 @@ static int sec_engine_init(struct sec_dev *sec)
/* config endian */
reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg |= sec_get_endian(sec);
reg |= sec_get_endian(qm);
writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
/* Enable sm4 xts mode multiple iv */
@ -325,10 +318,8 @@ static int sec_engine_init(struct sec_dev *sec)
return 0;
}
static int sec_set_user_domain_and_cache(struct sec_dev *sec)
static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
{
struct hisi_qm *qm = &sec->qm;
/* qm user domain */
writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
@ -349,7 +340,7 @@ static int sec_set_user_domain_and_cache(struct sec_dev *sec)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
return sec_engine_init(sec);
return sec_engine_init(qm);
}
/* sec_debug_regs_clear() - clear the sec debug regs */
@ -424,23 +415,22 @@ static u32 sec_current_qm_read(struct sec_debug_file *file)
static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
{
struct hisi_qm *qm = file->qm;
struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
u32 vfq_num;
u32 tmp;
if (val > sec->num_vfs)
if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (!val) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs;
vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
if (val == sec->num_vfs)
if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num =
qm->ctrl_qp_num - qm->qp_num -
(sec->num_vfs - 1) * vfq_num;
(qm->vfs_num - 1) * vfq_num;
else
qm->debug.curr_qm_qp_num = vfq_num;
}
@ -570,10 +560,22 @@ static const struct file_operations sec_dbg_fops = {
static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
*val = atomic64_read((atomic64_t *)data);
return 0;
}
static int sec_debugfs_atomic64_set(void *data, u64 val)
{
if (val)
return -EINVAL;
atomic64_set((atomic64_t *)data, 0);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
NULL, "%lld\n");
sec_debugfs_atomic64_set, "%lld\n");
static int sec_core_debug_init(struct sec_dev *sec)
{
@ -582,6 +584,7 @@ static int sec_core_debug_init(struct sec_dev *sec)
struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
int i;
tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
@ -593,13 +596,15 @@ static int sec_core_debug_init(struct sec_dev *sec)
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
debugfs_create_regset32("regs", 0444, tmp_d, regset);
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
debugfs_create_regset32("regs", 0444, tmp_d, regset);
debugfs_create_file("send_cnt", 0444, tmp_d,
&dfx->send_cnt, &sec_atomic64_ops);
debugfs_create_file("recv_cnt", 0444, tmp_d,
&dfx->recv_cnt, &sec_atomic64_ops);
for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
sec_dfx_labels[i].offset);
debugfs_create_file(sec_dfx_labels[i].name, 0644,
tmp_d, data, &sec_atomic64_ops);
}
return 0;
}
@ -630,6 +635,9 @@ static int sec_debugfs_init(struct sec_dev *sec)
qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
sec_debugfs_root);
qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
@ -675,8 +683,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
errs++;
}
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}
static u32 sec_get_hw_err_status(struct hisi_qm *qm)
@ -684,17 +690,36 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + SEC_CORE_INT_STATUS);
}
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
}
static const struct hisi_qm_err_ini sec_err_ini = {
.hw_init = sec_set_user_domain_and_cache,
.hw_err_enable = sec_hw_error_enable,
.hw_err_disable = sec_hw_error_disable,
.get_dev_hw_err_status = sec_get_hw_err_status,
.clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
.msi = QM_DB_RANDOM_INVALID,
.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
.msi_wr_port = BIT(0),
.acpi_rst = "SRST",
}
};
@ -703,22 +728,14 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
switch (qm->ver) {
case QM_HW_V1:
if (qm->ver == QM_HW_V1)
qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
break;
case QM_HW_V2:
else
qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
break;
default:
return -EINVAL;
}
qm->err_ini = &sec_err_ini;
ret = sec_set_user_domain_and_cache(sec);
ret = sec_set_user_domain_and_cache(qm);
if (ret)
return ret;
@ -730,32 +747,30 @@ static int sec_pf_probe_init(struct sec_dev *sec)
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
enum qm_hw_ver rev_id;
rev_id = hisi_qm_get_hw_version(pdev);
if (rev_id == QM_HW_UNKNOWN)
return -ENODEV;
int ret;
qm->pdev = pdev;
qm->ver = rev_id;
qm->ver = pdev->revision;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
QM_HW_PF : QM_HW_VF;
qm->use_dma_api = true;
return hisi_qm_init(qm);
}
static void sec_qm_uninit(struct hisi_qm *qm)
{
hisi_qm_uninit(qm);
}
static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
{
int ret;
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = SEC_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &sec_devices;
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
/*
* have no way to get qm configure in VM in v1 hardware,
* so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
* to trigger only one VF in v1 hardware.
* v2 hardware has no such problem.
*/
qm->qp_base = SEC_PF_DEF_Q_NUM;
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
}
/*
* WQ_HIGHPRI: SEC request must be low delayed,
@ -763,47 +778,38 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
* WQ_UNBOUND: SEC task is likely with long
* running CPU intensive workloads.
*/
qm->wq = alloc_workqueue("%s", WQ_HIGHPRI |
WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
pci_name(qm->pdev));
qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
WQ_UNBOUND, num_online_cpus(),
pci_name(qm->pdev));
if (!qm->wq) {
pci_err(qm->pdev, "fail to alloc workqueue\n");
return -ENOMEM;
}
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = SEC_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
ret = hisi_qm_init(qm);
if (ret)
destroy_workqueue(qm->wq);
return ret;
}
static void sec_qm_uninit(struct hisi_qm *qm)
{
hisi_qm_uninit(qm);
}
static int sec_probe_init(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
int ret;
if (qm->fun_type == QM_HW_PF) {
ret = sec_pf_probe_init(sec);
if (ret)
goto err_probe_uninit;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
* so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
* to trigger only one VF in v1 hardware.
* v2 hardware has no such problem.
*/
if (qm->ver == QM_HW_V1) {
qm->qp_base = SEC_PF_DEF_Q_NUM;
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
} else if (qm->ver == QM_HW_V2) {
/* v2 starts to support get vft by mailbox */
ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
if (ret)
goto err_probe_uninit;
}
} else {
ret = -ENODEV;
goto err_probe_uninit;
return ret;
}
return 0;
err_probe_uninit:
destroy_workqueue(qm->wq);
return ret;
}
static void sec_probe_uninit(struct hisi_qm *qm)
@ -840,20 +846,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!sec)
return -ENOMEM;
pci_set_drvdata(pdev, sec);
qm = &sec->qm;
ret = sec_qm_init(qm, pdev);
if (ret) {
pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
return ret;
}
sec->ctx_q_num = ctx_q_num;
sec_iommu_used_check(sec);
qm = &sec->qm;
ret = sec_qm_init(qm, pdev);
if (ret) {
pci_err(pdev, "Failed to pre init qm!\n");
return ret;
}
ret = sec_probe_init(qm, sec);
ret = sec_probe_init(sec);
if (ret) {
pci_err(pdev, "Failed to probe!\n");
goto err_qm_uninit;
@ -877,8 +880,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_remove_from_list;
}
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
goto err_crypto_unregister;
}
return 0;
err_crypto_unregister:
sec_unregister_from_crypto();
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
sec_debugfs_exit(sec);
@ -893,110 +905,6 @@ err_qm_uninit:
return ret;
}
/* now we only support equal assignment */
static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs)
{
struct hisi_qm *qm = &sec->qm;
u32 qp_num = qm->qp_num;
u32 q_base = qp_num;
u32 q_num, remain_q_num;
int i, j, ret;
if (!num_vfs)
return -EINVAL;
remain_q_num = qm->ctrl_qp_num - qp_num;
q_num = remain_q_num / num_vfs;
for (i = 1; i <= num_vfs; i++) {
if (i == num_vfs)
q_num += remain_q_num % num_vfs;
ret = hisi_qm_set_vft(qm, i, q_base, q_num);
if (ret) {
for (j = i; j > 0; j--)
hisi_qm_set_vft(qm, j, 0, 0);
return ret;
}
q_base += q_num;
}
return 0;
}
static int sec_clear_vft_config(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
u32 num_vfs = sec->num_vfs;
int ret;
u32 i;
for (i = 1; i <= num_vfs; i++) {
ret = hisi_qm_set_vft(qm, i, 0, 0);
if (ret)
return ret;
}
sec->num_vfs = 0;
return 0;
}
static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
{
struct sec_dev *sec = pci_get_drvdata(pdev);
int pre_existing_vfs, ret;
u32 num_vfs;
pre_existing_vfs = pci_num_vf(pdev);
if (pre_existing_vfs) {
pci_err(pdev, "Can't enable VF. Please disable at first!\n");
return 0;
}
num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
ret = sec_vf_q_assign(sec, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
return ret;
}
sec->num_vfs = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
sec_clear_vft_config(sec);
return ret;
}
return num_vfs;
}
static int sec_sriov_disable(struct pci_dev *pdev)
{
struct sec_dev *sec = pci_get_drvdata(pdev);
if (pci_vfs_assigned(pdev)) {
pci_err(pdev, "Can't disable VFs while VFs are assigned!\n");
return -EPERM;
}
/* remove in sec_pci_driver will be called to free VF resources */
pci_disable_sriov(pdev);
return sec_clear_vft_config(sec);
}
static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs)
return sec_sriov_enable(pdev, num_vfs);
else
return sec_sriov_disable(pdev);
}
static void sec_remove(struct pci_dev *pdev)
{
struct sec_dev *sec = pci_get_drvdata(pdev);
@ -1006,8 +914,8 @@ static void sec_remove(struct pci_dev *pdev)
hisi_qm_del_from_list(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && sec->num_vfs)
(void)sec_sriov_disable(pdev);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev);
sec_debugfs_exit(sec);
@ -1023,6 +931,9 @@ static void sec_remove(struct pci_dev *pdev)
static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
.reset_prepare = hisi_qm_reset_prepare,
.reset_done = hisi_qm_reset_done,
};
static struct pci_driver sec_pci_driver = {
@ -1031,7 +942,7 @@ static struct pci_driver sec_pci_driver = {
.probe = sec_probe,
.remove = sec_remove,
.err_handler = &sec_err_handler,
.sriov_configure = sec_sriov_configure,
.sriov_configure = hisi_qm_sriov_configure,
};
static void sec_register_debugfs(void)

View File

@ -28,12 +28,20 @@ enum hisi_zip_error_type {
HZIP_NC_ERR = 0x0d,
};
struct hisi_zip_dfx {
atomic64_t send_cnt;
atomic64_t recv_cnt;
atomic64_t send_busy_cnt;
atomic64_t err_bd_cnt;
};
struct hisi_zip_ctrl;
struct hisi_zip {
struct hisi_qm qm;
struct list_head list;
struct hisi_zip_ctrl *ctrl;
struct hisi_zip_dfx dfx;
};
struct hisi_zip_sqe {

View File

@ -64,7 +64,6 @@ struct hisi_zip_req_q {
struct hisi_zip_qp_ctx {
struct hisi_qp *qp;
struct hisi_zip_sqe zip_sqe;
struct hisi_zip_req_q req_q;
struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
@ -333,6 +332,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
{
struct hisi_zip_sqe *sqe = data;
struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *req = req_q->q + sqe->tag;
struct acomp_req *acomp_req = req->req;
@ -340,12 +340,14 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
u32 status, dlen, head_size;
int err = 0;
atomic64_inc(&dfx->recv_cnt);
status = sqe->dw3 & HZIP_BD_STATUS_M;
if (status != 0 && status != HZIP_NC_ERR) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
sqe->produced);
atomic64_inc(&dfx->err_bd_cnt);
err = -EIO;
}
dlen = sqe->produced;
@ -484,11 +486,12 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_sqe zip_sqe;
dma_addr_t input;
dma_addr_t output;
int ret;
@ -511,15 +514,18 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen,
a_req->dlen, req->sskip, req->dskip);
hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
hisi_zip_config_tag(zip_sqe, req->req_id);
hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL);
hisi_zip_config_tag(&zip_sqe, req->req_id);
/* send command to start a task */
ret = hisi_qp_send(qp, zip_sqe);
if (ret < 0)
atomic64_inc(&dfx->send_cnt);
ret = hisi_qp_send(qp, &zip_sqe);
if (ret < 0) {
atomic64_inc(&dfx->send_busy_cnt);
goto err_unmap_output;
}
return -EINPROGRESS;

View File

@ -62,6 +62,7 @@
#define HZIP_CORE_INT_SOURCE 0x3010A0
#define HZIP_CORE_INT_MASK_REG 0x3010A4
#define HZIP_CORE_INT_SET 0x3010A8
#define HZIP_CORE_INT_STATUS 0x3010AC
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
@ -83,8 +84,13 @@
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_WR_PORT BIT(11)
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
#define HZIP_SQE_MASK_LEN 48
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
@ -95,6 +101,18 @@ struct hisi_zip_hw_error {
const char *msg;
};
struct zip_dfx_item {
const char *name;
u32 offset;
};
static struct zip_dfx_item zip_dfx_files[] = {
{"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
{"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
{"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
{"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
};
static const struct hisi_zip_hw_error zip_hw_error[] = {
{ .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
{ .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
@ -134,7 +152,6 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_zip_ctrl {
u32 num_vfs;
struct hisi_zip *hisi_zip;
struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
@ -162,7 +179,7 @@ static const u64 core_offsets[] = {
[HZIP_DECOMP_CORE5] = 0x309000,
};
static struct debugfs_reg32 hzip_dfx_regs[] = {
static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_GET_BD_NUM ", 0x00ull},
{"HZIP_GET_RIGHT_BD ", 0x04ull},
{"HZIP_GET_ERROR_BD ", 0x08ull},
@ -189,38 +206,7 @@ static struct debugfs_reg32 hzip_dfx_regs[] = {
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
PCI_DEVICE_ID_ZIP_PF, NULL);
u32 n, q_num;
u8 rev_id;
int ret;
if (!val)
return -EINVAL;
if (!pdev) {
q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
pr_info("No device found currently, suppose queue number is %d\n",
q_num);
} else {
rev_id = pdev->revision;
switch (rev_id) {
case QM_HW_V1:
q_num = HZIP_QUEUE_NUM_V1;
break;
case QM_HW_V2:
q_num = HZIP_QUEUE_NUM_V2;
break;
default:
return -EINVAL;
}
}
ret = kstrtou32(val, 10, &n);
if (ret != 0 || n > q_num || n == 0)
return -EINVAL;
return param_set_int(val, kp);
return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@ -232,9 +218,14 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
.get = param_get_int,
};
static u32 vfs_num;
module_param(vfs_num, uint, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
@ -250,9 +241,9 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = hisi_zip->qm.io_base;
void __iomem *base = qm->io_base;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@ -279,7 +270,7 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
if (hisi_zip->qm.use_sva) {
if (qm->use_sva) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
@ -295,10 +286,14 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
return 0;
}
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
u32 val;
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@ -317,12 +312,24 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
/* enable ZIP block master OOO when m-bit error occur */
val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
val = val | HZIP_AXI_SHUTDOWN_ENABLE;
writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
u32 val;
/* disable ZIP hw error interrupts */
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
/* disable ZIP block master OOO when m-bit error occur */
val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
val = val & ~HZIP_AXI_SHUTDOWN_ENABLE;
writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@ -342,21 +349,20 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
struct hisi_zip_ctrl *ctrl = file->ctrl;
u32 vfq_num;
u32 tmp;
if (val > ctrl->num_vfs)
if (val > qm->vfs_num)
return -EINVAL;
/* Calculate curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs;
if (val == ctrl->num_vfs)
vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
qm->qp_num - (ctrl->num_vfs - 1) * vfq_num;
qm->qp_num - (qm->vfs_num - 1) * vfq_num;
else
qm->debug.curr_qm_qp_num = vfq_num;
}
@ -477,6 +483,27 @@ static const struct file_operations ctrl_debug_fops = {
.write = ctrl_debug_write,
};
static int zip_debugfs_atomic64_set(void *data, u64 val)
{
if (val)
return -EINVAL;
atomic64_set((atomic64_t *)data, 0);
return 0;
}
static int zip_debugfs_atomic64_get(void *data, u64 *val)
{
*val = atomic64_read((atomic64_t *)data);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
zip_debugfs_atomic64_set, "%llu\n");
static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
{
struct hisi_zip *hisi_zip = ctrl->hisi_zip;
@ -508,6 +535,25 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
return 0;
}
static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
{
struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
struct hisi_zip_dfx *dfx = &zip->dfx;
struct dentry *tmp_dir;
void *data;
int i;
tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
debugfs_create_file(zip_dfx_files[i].name,
0644,
tmp_dir,
data,
&zip_atomic64_ops);
}
}
static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
{
int i;
@ -534,6 +580,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
qm->debug.debug_root = dev_d;
ret = hisi_qm_debug_init(qm);
if (ret)
@ -546,6 +594,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
}
hisi_zip_dfx_debug_init(qm);
return 0;
failed_to_create:
@ -598,8 +648,6 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
err++;
}
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
}
static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
@ -607,17 +655,55 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + HZIP_CORE_INT_STATUS);
}
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
{
u32 nfe_enb;
/* Disable ECC Mbit error report. */
nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
/* Inject zip ECC Mbit error to block master ooo. */
writel(HZIP_CORE_INT_STATUS_M_ECC,
qm->io_base + HZIP_CORE_INT_SET);
}
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.hw_init = hisi_zip_set_user_domain_and_cache,
.hw_err_enable = hisi_zip_hw_error_enable,
.hw_err_disable = hisi_zip_hw_error_disable,
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
.log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE |
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
.msi = QM_DB_RANDOM_INVALID,
.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
.msi_wr_port = HZIP_WR_PORT,
.acpi_rst = "ZRST",
}
};
@ -633,164 +719,35 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
switch (qm->ver) {
case QM_HW_V1:
if (qm->ver == QM_HW_V1)
qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
break;
case QM_HW_V2:
else
qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
break;
default:
return -EINVAL;
}
qm->err_ini = &hisi_zip_err_ini;
hisi_zip_set_user_domain_and_cache(hisi_zip);
hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(hisi_zip);
return 0;
}
/* Currently we only support equal assignment */
static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
struct hisi_qm *qm = &hisi_zip->qm;
u32 qp_num = qm->qp_num;
u32 q_base = qp_num;
u32 q_num, remain_q_num, i;
int ret;
if (!num_vfs)
return -EINVAL;
remain_q_num = qm->ctrl_qp_num - qp_num;
if (remain_q_num < num_vfs)
return -EINVAL;
q_num = remain_q_num / num_vfs;
for (i = 1; i <= num_vfs; i++) {
if (i == num_vfs)
q_num += remain_q_num % num_vfs;
ret = hisi_qm_set_vft(qm, i, q_base, q_num);
if (ret)
return ret;
q_base += q_num;
}
return 0;
}
static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
{
struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
struct hisi_qm *qm = &hisi_zip->qm;
u32 i, num_vfs = ctrl->num_vfs;
int ret;
for (i = 1; i <= num_vfs; i++) {
ret = hisi_qm_set_vft(qm, i, 0, 0);
if (ret)
return ret;
}
ctrl->num_vfs = 0;
return 0;
}
static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
{
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
int pre_existing_vfs, num_vfs, ret;
pre_existing_vfs = pci_num_vf(pdev);
if (pre_existing_vfs) {
dev_err(&pdev->dev,
"Can't enable VF. Please disable pre-enabled VFs!\n");
return 0;
}
num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
if (ret) {
dev_err(&pdev->dev, "Can't assign queues for VF!\n");
return ret;
}
hisi_zip->ctrl->num_vfs = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
dev_err(&pdev->dev, "Can't enable VF!\n");
hisi_zip_clear_vft_config(hisi_zip);
return ret;
}
return num_vfs;
}
static int hisi_zip_sriov_disable(struct pci_dev *pdev)
{
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
if (pci_vfs_assigned(pdev)) {
dev_err(&pdev->dev,
"Can't disable VFs while VFs are assigned!\n");
return -EPERM;
}
/* remove in hisi_zip_pci_driver will be called to free VF resources */
pci_disable_sriov(pdev);
return hisi_zip_clear_vft_config(hisi_zip);
}
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
enum qm_hw_ver rev_id;
struct hisi_qm *qm;
int ret;
rev_id = hisi_qm_get_hw_version(pdev);
if (rev_id == QM_HW_UNKNOWN)
return -EINVAL;
hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
if (!hisi_zip)
return -ENOMEM;
pci_set_drvdata(pdev, hisi_zip);
qm = &hisi_zip->qm;
qm->use_dma_api = true;
qm->pdev = pdev;
qm->ver = rev_id;
qm->ver = pdev->revision;
qm->algs = "zlib\ngzip";
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
QM_HW_VF;
ret = hisi_qm_init(qm);
if (ret) {
dev_err(&pdev->dev, "Failed to init qm!\n");
return ret;
}
qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ?
QM_HW_PF : QM_HW_VF;
if (qm->fun_type == QM_HW_PF) {
ret = hisi_zip_pf_probe_init(hisi_zip);
if (ret)
return ret;
qm->qp_base = HZIP_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
} else if (qm->fun_type == QM_HW_VF) {
qm->qm_list = &zip_devices;
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
/*
* have no way to get qm configure in VM in v1 hardware,
* so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
@ -798,12 +755,49 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
*
* v2 hardware has no such problem.
*/
if (qm->ver == QM_HW_V1) {
qm->qp_base = HZIP_PF_DEF_Q_NUM;
qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
} else if (qm->ver == QM_HW_V2)
/* v2 starts to support get vft by mailbox */
hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
qm->qp_base = HZIP_PF_DEF_Q_NUM;
qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
return hisi_qm_init(qm);
}
static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
{
struct hisi_qm *qm = &hisi_zip->qm;
int ret;
if (qm->fun_type == QM_HW_PF) {
ret = hisi_zip_pf_probe_init(hisi_zip);
if (ret)
return ret;
}
return 0;
}
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
struct hisi_qm *qm;
int ret;
hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
if (!hisi_zip)
return -ENOMEM;
qm = &hisi_zip->qm;
ret = hisi_zip_qm_init(qm, pdev);
if (ret) {
pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
return ret;
}
ret = hisi_zip_probe_init(hisi_zip);
if (ret) {
pci_err(pdev, "Failed to probe (%d)!\n", ret);
goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
@ -823,7 +817,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
ret = hisi_zip_sriov_enable(pdev, vfs_num);
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
goto err_remove_from_list;
}
@ -836,15 +830,8 @@ err_remove_from_list:
hisi_qm_stop(qm);
err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
}
static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
return hisi_zip_sriov_disable(pdev);
else
return hisi_zip_sriov_enable(pdev, num_vfs);
return ret;
}
static void hisi_zip_remove(struct pci_dev *pdev)
@ -852,8 +839,8 @@ static void hisi_zip_remove(struct pci_dev *pdev)
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hisi_zip->qm;
if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
hisi_zip_sriov_disable(pdev);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev);
hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm);
@ -865,6 +852,9 @@ static void hisi_zip_remove(struct pci_dev *pdev)
static const struct pci_error_handlers hisi_zip_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
.reset_prepare = hisi_qm_reset_prepare,
.reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_zip_pci_driver = {
@ -873,7 +863,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.probe = hisi_zip_probe,
.remove = hisi_zip_remove,
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
hisi_zip_sriov_configure : NULL,
hisi_qm_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
};

View File

@ -79,13 +79,13 @@ static int otx_cpt_device_init(struct otx_cpt_device *cpt)
/* Check BIST status */
bist = (u64)otx_cpt_check_bist_status(cpt);
if (bist) {
dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
dev_err(dev, "RAM BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
bist = otx_cpt_check_exe_bist_status(cpt);
if (bist) {
dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
dev_err(dev, "Engine BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}

View File

@ -63,11 +63,11 @@ static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
if (vf_id >= 0)
pr_debug("MBOX opcode %s received from VF%d raw_data %s",
pr_debug("MBOX opcode %s received from VF%d raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), vf_id,
raw_data_str);
else
pr_debug("MBOX opcode %s received from PF raw_data %s",
pr_debug("MBOX opcode %s received from PF raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
}
@ -140,20 +140,20 @@ static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
struct otx_cpt_ucode *ucode;
if (q >= cpt->max_vfs) {
dev_err(dev, "Requested queue %d is > than maximum avail %d",
dev_err(dev, "Requested queue %d is > than maximum avail %d\n",
q, cpt->max_vfs);
return -EINVAL;
}
if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Requested group %d is > than maximum avail %d",
dev_err(dev, "Requested group %d is > than maximum avail %d\n",
grp, OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
eng_grp = &cpt->eng_grps.grp[grp];
if (!eng_grp->is_enabled) {
dev_err(dev, "Requested engine group %d is disabled", grp);
dev_err(dev, "Requested engine group %d is disabled\n", grp);
return -EINVAL;
}
@ -212,7 +212,7 @@ static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
if ((vftype != OTX_CPT_AE_TYPES) &&
(vftype != OTX_CPT_SE_TYPES)) {
dev_err(dev, "VF%d binding to eng group %llu failed",
dev_err(dev, "VF%d binding to eng group %llu failed\n",
vf, mbx.data);
otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
} else {

View File

@ -62,7 +62,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
int i;
if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(dev, "unsupported number of engines %d on octeontx",
dev_err(dev, "unsupported number of engines %d on octeontx\n",
eng_grp->g->engs_num);
return bmap;
}
@ -78,7 +78,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
}
if (!found)
dev_err(dev, "No engines reserved for engine group %d",
dev_err(dev, "No engines reserved for engine group %d\n",
eng_grp->idx);
return bmap;
}
@ -306,7 +306,7 @@ static int process_tar_file(struct device *dev,
ucode_size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size", filename);
dev_err(dev, "Ucode %s invalid size\n", filename);
return -EINVAL;
}
@ -379,18 +379,18 @@ static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
{
struct tar_ucode_info_t *curr;
pr_debug("Tar archive filename %s", tar_filename);
pr_debug("Tar archive pointer %p, size %ld", tar_arch->fw->data,
pr_debug("Tar archive filename %s\n", tar_filename);
pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
tar_arch->fw->size);
list_for_each_entry(curr, &tar_arch->ucodes, list) {
pr_debug("Ucode filename %s", curr->ucode.filename);
pr_debug("Ucode version string %s", curr->ucode.ver_str);
pr_debug("Ucode version %d.%d.%d.%d",
pr_debug("Ucode filename %s\n", curr->ucode.filename);
pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
pr_debug("Ucode version %d.%d.%d.%d\n",
curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
pr_debug("Ucode type (%d) %s", curr->ucode.type,
pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
get_ucode_type_str(curr->ucode.type));
pr_debug("Ucode size %d", curr->ucode.size);
pr_debug("Ucode size %d\n", curr->ucode.size);
pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
}
}
@ -417,14 +417,14 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
goto release_tar_arch;
if (tar_arch->fw->size < TAR_BLOCK_LEN) {
dev_err(dev, "Invalid tar archive %s ", tar_filename);
dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
tar_size = tar_arch->fw->size;
tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
dev_err(dev, "Unsupported format of tar archive %s",
dev_err(dev, "Unsupported format of tar archive %s\n",
tar_filename);
goto release_tar_arch;
}
@ -437,7 +437,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
if (tar_offs + cur_size > tar_size ||
tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
dev_err(dev, "Invalid tar archive %s ", tar_filename);
dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
@ -458,7 +458,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
/* Check for the end of the archive */
if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
dev_err(dev, "Invalid tar archive %s ", tar_filename);
dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
@ -563,13 +563,13 @@ static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
{
pr_debug("Ucode info");
pr_debug("Ucode version string %s", ucode->ver_str);
pr_debug("Ucode version %d.%d.%d.%d", ucode->ver_num.nn,
pr_debug("Ucode info\n");
pr_debug("Ucode version string %s\n", ucode->ver_str);
pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
pr_debug("Ucode type %s", get_ucode_type_str(ucode->type));
pr_debug("Ucode size %d", ucode->size);
pr_debug("Ucode virt address %16.16llx", (u64)ucode->align_va);
pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
pr_debug("Ucode size %d\n", ucode->size);
pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
}
@ -600,19 +600,19 @@ static void print_dbg_info(struct device *dev,
u32 mask[4];
int i, j;
pr_debug("Engine groups global info");
pr_debug("max SE %d, max AE %d",
pr_debug("Engine groups global info\n");
pr_debug("max SE %d, max AE %d\n",
eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
pr_debug("free SE %d", eng_grps->avail.se_cnt);
pr_debug("free AE %d", eng_grps->avail.ae_cnt);
pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
pr_debug("engine_group%d, state %s", i, grp->is_enabled ?
pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
"enabled" : "disabled");
if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
pr_debug("Ucode0 filename %s, version %s",
pr_debug("Ucode0 filename %s, version %s\n",
grp->mirror.is_ena ?
mirrored_grp->ucode[0].filename :
grp->ucode[0].filename,
@ -626,18 +626,18 @@ static void print_dbg_info(struct device *dev,
if (engs->type) {
print_engs_info(grp, engs_info,
2*OTX_CPT_UCODE_NAME_LENGTH, j);
pr_debug("Slot%d: %s", j, engs_info);
pr_debug("Slot%d: %s\n", j, engs_info);
bitmap_to_arr32(mask, engs->bmap,
eng_grps->engs_num);
pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
mask[3], mask[2], mask[1], mask[0]);
} else
pr_debug("Slot%d not used", j);
pr_debug("Slot%d not used\n", j);
}
if (grp->is_enabled) {
cpt_print_engines_mask(grp, dev, engs_mask,
OTX_CPT_UCODE_NAME_LENGTH);
pr_debug("Cmask: %s", engs_mask);
pr_debug("Cmask: %s\n", engs_mask);
}
}
}
@ -766,7 +766,7 @@ static int check_engines_availability(struct device *dev,
if (avail_cnt < req_eng->count) {
dev_err(dev,
"Error available %s engines %d < than requested %d",
"Error available %s engines %d < than requested %d\n",
get_eng_type_str(req_eng->type),
avail_cnt, req_eng->count);
return -EBUSY;
@ -867,7 +867,7 @@ static int copy_ucode_to_dma_mem(struct device *dev,
OTX_CPT_UCODE_ALIGNMENT,
&ucode->dma, GFP_KERNEL);
if (!ucode->va) {
dev_err(dev, "Unable to allocate space for microcode");
dev_err(dev, "Unable to allocate space for microcode\n");
return -ENOMEM;
}
ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
@ -905,15 +905,15 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
ucode->size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size", ucode_filename);
dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
ret = -EINVAL;
goto release_fw;
}
ret = get_ucode_type(ucode_hdr, &ucode->type);
if (ret) {
dev_err(dev, "Microcode %s unknown type 0x%x", ucode->filename,
ucode->type);
dev_err(dev, "Microcode %s unknown type 0x%x\n",
ucode->filename, ucode->type);
goto release_fw;
}
@ -1083,7 +1083,7 @@ static int eng_grp_update_masks(struct device *dev,
break;
default:
dev_err(dev, "Invalid engine type %d", engs->type);
dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
@ -1142,13 +1142,14 @@ static int delete_engine_group(struct device *dev,
return -EINVAL;
if (eng_grp->mirror.ref_count) {
dev_err(dev, "Can't delete engine_group%d as it is used by:",
dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
eng_grp->idx);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
if (eng_grp->g->grp[i].mirror.is_ena &&
eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
dev_err(dev, "engine_group%d", i);
pr_cont(" %d", i);
}
pr_cont("\n");
return -EINVAL;
}
@ -1182,7 +1183,7 @@ static int validate_1_ucode_scenario(struct device *dev,
if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
engs[i].type)) {
dev_err(dev,
"Microcode %s does not support %s engines",
"Microcode %s does not support %s engines\n",
eng_grp->ucode[0].filename,
get_eng_type_str(engs[i].type));
return -EINVAL;
@ -1220,7 +1221,7 @@ static int create_engine_group(struct device *dev,
/* Validate if requested engine types are supported by this device */
for (i = 0; i < engs_cnt; i++)
if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
dev_err(dev, "Device does not support %s engines",
dev_err(dev, "Device does not support %s engines\n",
get_eng_type_str(engs[i].type));
return -EPERM;
}
@ -1228,7 +1229,7 @@ static int create_engine_group(struct device *dev,
/* Find engine group which is not used */
eng_grp = find_unused_eng_grp(eng_grps);
if (!eng_grp) {
dev_err(dev, "Error all engine groups are being used");
dev_err(dev, "Error all engine groups are being used\n");
return -ENOSPC;
}
@ -1298,11 +1299,11 @@ static int create_engine_group(struct device *dev,
eng_grp->is_enabled = true;
if (eng_grp->mirror.is_ena)
dev_info(dev,
"Engine_group%d: reuse microcode %s from group %d",
"Engine_group%d: reuse microcode %s from group %d\n",
eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
mirrored_eng_grp->idx);
else
dev_info(dev, "Engine_group%d: microcode loaded %s",
dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[0].ver_str);
return 0;
@ -1412,14 +1413,14 @@ static ssize_t ucode_load_store(struct device *dev,
} else {
if (del_grp_idx < 0 ||
del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Invalid engine group index %d",
dev_err(dev, "Invalid engine group index %d\n",
del_grp_idx);
ret = -EINVAL;
return ret;
}
if (!eng_grps->grp[del_grp_idx].is_enabled) {
dev_err(dev, "Error engine_group%d is not configured",
dev_err(dev, "Error engine_group%d is not configured\n",
del_grp_idx);
ret = -EINVAL;
return ret;
@ -1568,7 +1569,7 @@ void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
udelay(CSR_DELAY);
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
if (timeout--) {
dev_warn(&cpt->pdev->dev, "Cores still busy");
dev_warn(&cpt->pdev->dev, "Cores still busy\n");
break;
}
}
@ -1626,7 +1627,7 @@ int otx_cpt_init_eng_grps(struct pci_dev *pdev,
eng_grps->avail.max_ae_cnt;
if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(&pdev->dev,
"Number of engines %d > than max supported %d",
"Number of engines %d > than max supported %d\n",
eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
ret = -EINVAL;
goto err;

View File

@ -1660,7 +1660,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
case OTX_CPT_SE_TYPES:
count = atomic_read(&se_devices.count);
if (count >= CPT_MAX_VF_NUM) {
dev_err(&pdev->dev, "No space to add a new device");
dev_err(&pdev->dev, "No space to add a new device\n");
ret = -ENOSPC;
goto err;
}
@ -1687,7 +1687,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
case OTX_CPT_AE_TYPES:
count = atomic_read(&ae_devices.count);
if (count >= CPT_MAX_VF_NUM) {
dev_err(&pdev->dev, "No space to a add new device");
dev_err(&pdev->dev, "No space to a add new device\n");
ret = -ENOSPC;
goto err;
}
@ -1728,7 +1728,7 @@ void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
}
if (!dev_found) {
dev_err(&pdev->dev, "%s device not found", __func__);
dev_err(&pdev->dev, "%s device not found\n", __func__);
goto exit;
}

View File

@ -584,7 +584,7 @@ static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
cptvf_write_vq_done_ack(cptvf, intr);
wqe = get_cptvf_vq_wqe(cptvf, 0);
if (unlikely(!wqe)) {
dev_err(&pdev->dev, "No work to schedule for VF (%d)",
dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
cptvf->vfid);
return IRQ_NONE;
}
@ -602,7 +602,7 @@ static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
GFP_KERNEL)) {
dev_err(&pdev->dev,
"Allocation failed for affinity_mask for VF %d",
"Allocation failed for affinity_mask for VF %d\n",
cptvf->vfid);
return;
}
@ -691,7 +691,7 @@ static ssize_t vf_engine_group_store(struct device *dev,
return -EINVAL;
if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Engine group >= than max available groups %d",
dev_err(dev, "Engine group >= than max available groups %d\n",
OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
@ -837,7 +837,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev,
cptvf_misc_intr_handler, 0, "CPT VF misc intr",
cptvf);
if (err) {
dev_err(dev, "Failed to request misc irq");
dev_err(dev, "Failed to request misc irq\n");
goto free_vectors;
}
@ -854,7 +854,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev,
cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
if (err) {
dev_err(dev, "cptvf_sw_init() failed");
dev_err(dev, "cptvf_sw_init() failed\n");
goto free_misc_irq;
}
/* Convey VQ LEN to PF */
@ -946,7 +946,7 @@ static void otx_cptvf_remove(struct pci_dev *pdev)
/* Convey DOWN to PF */
if (otx_cptvf_send_vf_down(cptvf)) {
dev_err(&pdev->dev, "PF not responding to DOWN msg");
dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
} else {
sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);

View File

@ -314,7 +314,7 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
GFP_ATOMIC;
ret = setup_sgio_list(pdev, &info, req, gfp);
if (unlikely(ret)) {
dev_err(&pdev->dev, "Setting up SG list failed");
dev_err(&pdev->dev, "Setting up SG list failed\n");
goto request_cleanup;
}
cpt_req->dlen = info->dlen;
@ -410,17 +410,17 @@ int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
if (!otx_cpt_device_ready(cptvf)) {
dev_err(&pdev->dev, "CPT Device is not ready");
dev_err(&pdev->dev, "CPT Device is not ready\n");
return -ENODEV;
}
if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) {
dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request\n",
cptvf->vfid);
return -EINVAL;
} else if ((cptvf->vftype == OTX_CPT_AE_TYPES) &&
(req->ctrl.s.se_req)) {
dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request\n",
cptvf->vfid);
return -EINVAL;
}
@ -461,7 +461,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
/* check for timeout */
if (time_after_eq(jiffies, cpt_info->time_in +
OTX_CPT_COMMAND_TIMEOUT * HZ))
dev_warn(&pdev->dev, "Request timed out 0x%p", req);
dev_warn(&pdev->dev, "Request timed out 0x%p\n", req);
else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
cpt_info->time_in = jiffies;
cpt_info->extra_time++;

View File

@ -805,12 +805,9 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
size_t ds = crypto_shash_digestsize(bctx->shash);
int err, i;
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
bctx->ipad);
if (err)
return err;
keylen = ds;

View File

@ -462,7 +462,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child_shash = ctx->child_shash;
struct crypto_ahash *fallback_tfm;
SHASH_DESC_ON_STACK(shash, child_shash);
int err, bs, ds;
fallback_tfm = ctx->base.fallback_tfm;
@ -470,14 +469,12 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
if (err)
return err;
shash->tfm = child_shash;
bs = crypto_shash_blocksize(child_shash);
ds = crypto_shash_digestsize(child_shash);
BUG_ON(ds > N2_HASH_KEY_MAX);
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen,
ctx->hash_key);
err = crypto_shash_tfm_digest(child_shash, key, keylen,
ctx->hash_key);
if (err)
return err;
keylen = ds;

View File

@ -33,7 +33,6 @@
#include <linux/of_irq.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
@ -1245,16 +1244,6 @@ static int omap_sham_update(struct ahash_request *req)
return omap_sham_enqueue(req, OP_UPDATE);
}
static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
const u8 *data, unsigned int len, u8 *out)
{
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
return crypto_shash_digest(shash, data, len, out);
}
static int omap_sham_final_shash(struct ahash_request *req)
{
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
@ -1270,9 +1259,8 @@ static int omap_sham_final_shash(struct ahash_request *req)
!test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
offset = get_block_size(ctx);
return omap_sham_shash_digest(tctx->fallback, req->base.flags,
ctx->buffer + offset,
ctx->bufcnt - offset, req->result);
return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
ctx->bufcnt - offset, req->result);
}
static int omap_sham_final(struct ahash_request *req)
@ -1351,9 +1339,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
return err;
if (keylen > bs) {
err = omap_sham_shash_digest(bctx->shash,
crypto_shash_get_flags(bctx->shash),
key, keylen, bctx->ipad);
err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
bctx->ipad);
if (err)
return err;
keylen = ds;

View File

@ -1520,37 +1520,6 @@ static int s5p_hash_update(struct ahash_request *req)
return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
}
/**
* s5p_hash_shash_digest() - calculate shash digest
* @tfm: crypto transformation
* @flags: tfm flags
* @data: input data
* @len: length of data
* @out: output buffer
*/
static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
const u8 *data, unsigned int len, u8 *out)
{
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
return crypto_shash_digest(shash, data, len, out);
}
/**
* s5p_hash_final_shash() - calculate shash digest
* @req: AHASH request
*/
static int s5p_hash_final_shash(struct ahash_request *req)
{
struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
ctx->buffer, ctx->bufcnt, req->result);
}
/**
* s5p_hash_final() - close up hash and calculate digest
* @req: AHASH request
@ -1582,8 +1551,12 @@ static int s5p_hash_final(struct ahash_request *req)
if (ctx->error)
return -EINVAL; /* uncompleted hash is not needed */
if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
return s5p_hash_final_shash(req);
if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
ctx->bufcnt, req->result);
}
return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
}

View File

@ -28,18 +28,23 @@
/* Registers values */
#define CRC_CR_RESET BIT(0)
#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
#define CRC_INIT_DEFAULT 0xFFFFFFFF
#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
#define CRC_CR_REV_IN_BYTE BIT(5)
#define CRC_CR_REV_OUT BIT(7)
#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
#define CRC_AUTOSUSPEND_DELAY 50
static unsigned int burst_size;
module_param(burst_size, uint, 0644);
MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
struct stm32_crc {
struct list_head list;
struct device *dev;
void __iomem *regs;
struct clk *clk;
u8 pending_data[sizeof(u32)];
size_t nb_pending_bytes;
spinlock_t lock;
};
struct stm32_crc_list {
@ -59,14 +64,13 @@ struct stm32_crc_ctx {
struct stm32_crc_desc_ctx {
u32 partial; /* crc32c: partial in first 4 bytes of that struct */
struct stm32_crc *crc;
};
static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
mctx->key = 0;
mctx->poly = CRC32_POLY_LE;
return 0;
}
@ -75,7 +79,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
mctx->key = CRC32C_INIT_DEFAULT;
mctx->poly = CRC32C_POLY_LE;
return 0;
}
@ -92,32 +96,109 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
return 0;
}
static struct stm32_crc *stm32_crc_get_next_crc(void)
{
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
if (crc)
list_move_tail(&crc->list, &crc_list.dev_list);
spin_unlock_bh(&crc_list.lock);
return crc;
}
static int stm32_crc_init(struct shash_desc *desc)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
unsigned long flags;
spin_lock_bh(&crc_list.lock);
list_for_each_entry(crc, &crc_list.dev_list, list) {
ctx->crc = crc;
break;
}
spin_unlock_bh(&crc_list.lock);
crc = stm32_crc_get_next_crc();
if (!crc)
return -ENODEV;
pm_runtime_get_sync(ctx->crc->dev);
pm_runtime_get_sync(crc->dev);
spin_lock_irqsave(&crc->lock, flags);
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
/* Store partial result */
ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
pm_runtime_mark_last_busy(ctx->crc->dev);
pm_runtime_put_autosuspend(ctx->crc->dev);
spin_unlock_irqrestore(&crc->lock, flags);
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
return 0;
}
static int burst_update(struct shash_desc *desc, const u8 *d8,
size_t length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
unsigned long flags;
crc = stm32_crc_get_next_crc();
if (!crc)
return -ENODEV;
pm_runtime_get_sync(crc->dev);
spin_lock_irqsave(&crc->lock, flags);
/*
* Restore previously calculated CRC for this context as init value
* Restore polynomial configuration
* Configure in register for word input data,
* Configure out register in reversed bit mode data.
*/
writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
/* Configure for byte data */
writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
writeb_relaxed(*d8++, crc->regs + CRC_DR);
length--;
}
/* Configure for word data */
writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
}
for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
if (length) {
/* Configure for byte data */
writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
while (length--)
writeb_relaxed(*d8++, crc->regs + CRC_DR);
}
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
spin_unlock_irqrestore(&crc->lock, flags);
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
return 0;
}
@ -125,55 +206,26 @@ static int stm32_crc_init(struct shash_desc *desc)
static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
unsigned int length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc *crc = ctx->crc;
u32 *d32;
unsigned int i;
const unsigned int burst_sz = burst_size;
unsigned int rem_sz;
const u8 *cur;
size_t size;
int ret;
pm_runtime_get_sync(crc->dev);
if (!burst_sz)
return burst_update(desc, d8, length);
if (unlikely(crc->nb_pending_bytes)) {
while (crc->nb_pending_bytes != sizeof(u32) && length) {
/* Fill in pending data */
crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
length--;
}
if (crc->nb_pending_bytes == sizeof(u32)) {
/* Process completed pending data */
writel_relaxed(*(u32 *)crc->pending_data,
crc->regs + CRC_DR);
crc->nb_pending_bytes = 0;
}
/* Digest first bytes not 32bit aligned at first pass in the loop */
size = min(length,
burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
sizeof(u32)));
for (rem_sz = length, cur = d8; rem_sz;
rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
ret = burst_update(desc, cur, size);
if (ret)
return ret;
}
d32 = (u32 *)d8;
for (i = 0; i < length >> 2; i++)
/* Process 32 bits data */
writel_relaxed(*(d32++), crc->regs + CRC_DR);
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
/* Check for pending data (non 32 bits) */
length &= 3;
if (likely(!length))
return 0;
if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
/* Shall not happen */
dev_err(crc->dev, "Pending data overflow\n");
return -EINVAL;
}
d8 = (const u8 *)d32;
for (i = 0; i < length; i++)
/* Store pending data */
crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
return 0;
}
@ -202,6 +254,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
}
static unsigned int refcnt;
static DEFINE_MUTEX(refcnt_lock);
static struct shash_alg algs[] = {
/* CRC-32 */
{
@ -284,20 +338,29 @@ static int stm32_crc_probe(struct platform_device *pdev)
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_irq_safe(dev);
pm_runtime_enable(dev);
spin_lock_init(&crc->lock);
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
if (ret) {
dev_err(dev, "Failed to register\n");
clk_disable_unprepare(crc->clk);
return ret;
mutex_lock(&refcnt_lock);
if (!refcnt) {
ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
if (ret) {
mutex_unlock(&refcnt_lock);
dev_err(dev, "Failed to register\n");
clk_disable_unprepare(crc->clk);
return ret;
}
}
refcnt++;
mutex_unlock(&refcnt_lock);
dev_info(dev, "Initialized\n");
@ -318,7 +381,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
mutex_lock(&refcnt_lock);
if (!--refcnt)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
mutex_unlock(&refcnt_lock);
pm_runtime_disable(crc->dev);
pm_runtime_put_noidle(crc->dev);
@ -328,34 +394,60 @@ static int stm32_crc_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM
static int stm32_crc_runtime_suspend(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
clk_disable_unprepare(crc->clk);
return 0;
}
static int stm32_crc_runtime_resume(struct device *dev)
static int __maybe_unused stm32_crc_suspend(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(crc->clk);
ret = pm_runtime_force_suspend(dev);
if (ret)
return ret;
clk_unprepare(crc->clk);
return 0;
}
static int __maybe_unused stm32_crc_resume(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
int ret;
ret = clk_prepare(crc->clk);
if (ret) {
dev_err(crc->dev, "Failed to prepare_enable clock\n");
dev_err(crc->dev, "Failed to prepare clock\n");
return ret;
}
return pm_runtime_force_resume(dev);
}
static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
clk_disable(crc->clk);
return 0;
}
static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
int ret;
ret = clk_enable(crc->clk);
if (ret) {
dev_err(crc->dev, "Failed to enable clock\n");
return ret;
}
return 0;
}
#endif
static const struct dev_pm_ops stm32_crc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
stm32_crc_resume)
SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
stm32_crc_runtime_resume, NULL)
};

View File

@ -507,6 +507,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
{
struct dma_slave_config dma_conf;
struct dma_chan *chan;
int err;
memset(&dma_conf, 0, sizeof(dma_conf));
@ -518,11 +519,11 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
hdev->dma_lch = dma_request_chan(hdev->dev, "in");
if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
return PTR_ERR(hdev->dma_lch);
}
chan = dma_request_chan(hdev->dev, "in");
if (IS_ERR(chan))
return PTR_ERR(chan);
hdev->dma_lch = chan;
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
if (err) {
@ -1463,8 +1464,11 @@ static int stm32_hash_probe(struct platform_device *pdev)
hdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hdev->clk)) {
dev_err(dev, "failed to get clock for hash (%lu)\n",
PTR_ERR(hdev->clk));
if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) {
dev_err(dev, "failed to get clock for hash (%lu)\n",
PTR_ERR(hdev->clk));
}
return PTR_ERR(hdev->clk);
}
@ -1482,7 +1486,12 @@ static int stm32_hash_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(hdev->rst)) {
if (IS_ERR(hdev->rst)) {
if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_reset;
}
} else {
reset_control_assert(hdev->rst);
udelay(2);
reset_control_deassert(hdev->rst);
@ -1493,8 +1502,15 @@ static int stm32_hash_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hdev);
ret = stm32_hash_dma_init(hdev);
if (ret)
switch (ret) {
case 0:
break;
case -ENOENT:
dev_dbg(dev, "DMA mode not available\n");
break;
default:
goto err_dma;
}
spin_lock(&stm32_hash.lock);
list_add_tail(&hdev->list, &stm32_hash.dev_list);
@ -1532,10 +1548,10 @@ err_engine:
spin_lock(&stm32_hash.lock);
list_del(&hdev->list);
spin_unlock(&stm32_hash.lock);
err_dma:
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
err_reset:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);

View File

@ -434,15 +434,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
goto out;
}
{
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
ret = crypto_shash_digest(desc, fw->image, image_size,
hash_data);
shash_desc_zero(desc);
}
ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data);
crypto_free_shash(tfm);
if (ret) {

View File

@ -83,13 +83,8 @@ static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result)
tfm = prev_tfm;
}
}
{
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
return crypto_shash_digest(desc, data, data_len, result);
}
return crypto_shash_tfm_digest(tfm, data, data_len, result);
}
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)

View File

@ -44,17 +44,13 @@ static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
unsigned int ikmlen, u8 prk[HKDF_HASHLEN])
{
static const u8 default_salt[HKDF_HASHLEN];
SHASH_DESC_ON_STACK(desc, hmac_tfm);
int err;
err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN);
if (err)
return err;
desc->tfm = hmac_tfm;
err = crypto_shash_digest(desc, ikm, ikmlen, prk);
shash_desc_zero(desc);
return err;
return crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
}
/*

View File

@ -48,18 +48,6 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size)
}
}
static int ecryptfs_hash_digest(struct crypto_shash *tfm,
char *src, int len, char *dst)
{
SHASH_DESC_ON_STACK(desc, tfm);
int err;
desc->tfm = tfm;
err = crypto_shash_digest(desc, src, len, dst);
shash_desc_zero(desc);
return err;
}
/**
* ecryptfs_calculate_md5 - calculates the md5 of @src
* @dst: Pointer to 16 bytes of allocated memory
@ -74,11 +62,8 @@ static int ecryptfs_calculate_md5(char *dst,
struct ecryptfs_crypt_stat *crypt_stat,
char *src, int len)
{
struct crypto_shash *tfm;
int rc = 0;
int rc = crypto_shash_tfm_digest(crypt_stat->hash_tfm, src, len, dst);
tfm = crypt_stat->hash_tfm;
rc = ecryptfs_hash_digest(tfm, src, len, dst);
if (rc) {
printk(KERN_ERR
"%s: Error computing crypto hash; rc = [%d]\n",

View File

@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/cryptohash.h>
#include <linux/pagemap.h>
#include <linux/unicode.h>

View File

@ -127,16 +127,8 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
goto out;
}
{
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
status = crypto_shash_digest(desc, clname->data, clname->len,
cksum.data);
shash_desc_zero(desc);
}
status = crypto_shash_tfm_digest(tfm, clname->data, clname->len,
cksum.data);
if (status)
goto out;
@ -1148,7 +1140,6 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
struct crypto_shash *tfm = cn->cn_tfm;
struct xdr_netobj cksum;
char *principal = NULL;
SHASH_DESC_ON_STACK(desc, tfm);
/* Don't upcall if it's already stored */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
@ -1170,16 +1161,14 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
else if (clp->cl_cred.cr_principal)
principal = clp->cl_cred.cr_principal;
if (principal) {
desc->tfm = tfm;
cksum.len = crypto_shash_digestsize(tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
if (cksum.data == NULL) {
ret = -ENOMEM;
goto out;
}
ret = crypto_shash_digest(desc, principal, strlen(principal),
cksum.data);
shash_desc_zero(desc);
ret = crypto_shash_tfm_digest(tfm, principal, strlen(principal),
cksum.data);
if (ret) {
kfree(cksum.data);
goto out;
@ -1343,7 +1332,6 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
struct crypto_shash *tfm = cn->cn_tfm;
struct xdr_netobj cksum;
char *principal = NULL;
SHASH_DESC_ON_STACK(desc, tfm);
/* did we already find that this client is stable? */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
@ -1381,14 +1369,12 @@ found:
principal = clp->cl_cred.cr_principal;
if (principal == NULL)
return -ENOENT;
desc->tfm = tfm;
cksum.len = crypto_shash_digestsize(tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
if (cksum.data == NULL)
return -ENOENT;
status = crypto_shash_digest(desc, principal, strlen(principal),
cksum.data);
shash_desc_zero(desc);
status = crypto_shash_tfm_digest(tfm, principal,
strlen(principal), cksum.data);
if (status) {
kfree(cksum.data);
return -ENOENT;

View File

@ -31,15 +31,9 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
u8 *hash)
{
const struct ubifs_ch *ch = node;
SHASH_DESC_ON_STACK(shash, c->hash_tfm);
int err;
shash->tfm = c->hash_tfm;
err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash);
if (err < 0)
return err;
return 0;
return crypto_shash_tfm_digest(c->hash_tfm, node, le32_to_cpu(ch->len),
hash);
}
/**
@ -53,15 +47,7 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash,
u8 *hmac)
{
SHASH_DESC_ON_STACK(shash, c->hmac_tfm);
int err;
shash->tfm = c->hmac_tfm;
err = crypto_shash_digest(shash, hash, c->hash_len, hmac);
if (err < 0)
return err;
return 0;
return crypto_shash_tfm_digest(c->hmac_tfm, hash, c->hash_len, hmac);
}
/**

View File

@ -68,12 +68,9 @@ static int mst_node_check_hash(const struct ubifs_info *c,
u8 calc[UBIFS_MAX_HASH_LEN];
const void *node = mst;
SHASH_DESC_ON_STACK(shash, c->hash_tfm);
shash->tfm = c->hash_tfm;
crypto_shash_digest(shash, node + sizeof(struct ubifs_ch),
UBIFS_MST_NODE_SZ - sizeof(struct ubifs_ch), calc);
crypto_shash_tfm_digest(c->hash_tfm, node + sizeof(struct ubifs_ch),
UBIFS_MST_NODE_SZ - sizeof(struct ubifs_ch),
calc);
if (ubifs_check_hash(c, expected, calc))
return -EPERM;

View File

@ -558,7 +558,7 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
return data == 0xFFFFFFFF;
}
/* authenticate_sleb_hash and authenticate_sleb_hmac are split out for stack usage */
/* authenticate_sleb_hash is split out for stack usage */
static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash)
{
SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
@ -569,15 +569,6 @@ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_h
return crypto_shash_final(hash_desc, hash);
}
static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac)
{
SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
hmac_desc->tfm = c->hmac_tfm;
return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac);
}
/**
* authenticate_sleb - authenticate one scan LEB
* @c: UBIFS file-system description object
@ -618,7 +609,8 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
if (err)
goto out;
err = authenticate_sleb_hmac(c, hash, hmac);
err = crypto_shash_tfm_digest(c->hmac_tfm, hash,
c->hash_len, hmac);
if (err)
goto out;

Some files were not shown because too many files have changed in this diff Show More