2019-05-27 06:55:01 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2016-08-31 12:02:57 +00:00
|
|
|
/*
|
|
|
|
* Crypto engine API
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
|
|
|
|
*/
|
|
|
|
#ifndef _CRYPTO_ENGINE_H
|
|
|
|
#define _CRYPTO_ENGINE_H
|
|
|
|
|
2023-08-13 06:54:49 +00:00
|
|
|
#include <crypto/aead.h>
|
|
|
|
#include <crypto/akcipher.h>
|
|
|
|
#include <crypto/hash.h>
|
|
|
|
#include <crypto/kpp.h>
|
|
|
|
#include <crypto/skcipher.h>
|
2021-12-10 14:30:09 +00:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
2023-08-13 06:54:47 +00:00
|
|
|
struct crypto_engine;
|
2021-12-10 14:30:09 +00:00
|
|
|
struct device;
|
|
|
|
|
2018-01-26 19:15:30 +00:00
|
|
|
/*
|
|
|
|
* struct crypto_engine_op - crypto hardware engine operations
|
|
|
|
* @do_one_request: do encryption for current request
|
|
|
|
*/
|
|
|
|
struct crypto_engine_op {
|
|
|
|
int (*do_one_request)(struct crypto_engine *engine,
|
|
|
|
void *areq);
|
|
|
|
};
|
|
|
|
|
2023-08-13 06:54:49 +00:00
|
|
|
struct aead_engine_alg {
|
|
|
|
struct aead_alg base;
|
|
|
|
struct crypto_engine_op op;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ahash_engine_alg {
|
|
|
|
struct ahash_alg base;
|
|
|
|
struct crypto_engine_op op;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct akcipher_engine_alg {
|
|
|
|
struct akcipher_alg base;
|
|
|
|
struct crypto_engine_op op;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct kpp_engine_alg {
|
|
|
|
struct kpp_alg base;
|
|
|
|
struct crypto_engine_op op;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct skcipher_engine_alg {
|
|
|
|
struct skcipher_alg base;
|
|
|
|
struct crypto_engine_op op;
|
|
|
|
};
|
|
|
|
|
2018-01-26 19:15:30 +00:00
|
|
|
int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
|
|
|
|
struct aead_request *req);
|
|
|
|
int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
|
|
|
|
struct akcipher_request *req);
|
2016-08-31 12:02:58 +00:00
|
|
|
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
|
2018-01-26 19:15:30 +00:00
|
|
|
struct ahash_request *req);
|
2021-10-20 10:35:34 +00:00
|
|
|
int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
|
|
|
|
struct kpp_request *req);
|
2018-01-26 19:15:30 +00:00
|
|
|
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
|
|
|
|
struct skcipher_request *req);
|
|
|
|
void crypto_finalize_aead_request(struct crypto_engine *engine,
|
|
|
|
struct aead_request *req, int err);
|
|
|
|
void crypto_finalize_akcipher_request(struct crypto_engine *engine,
|
|
|
|
struct akcipher_request *req, int err);
|
2016-08-31 12:02:58 +00:00
|
|
|
void crypto_finalize_hash_request(struct crypto_engine *engine,
|
|
|
|
struct ahash_request *req, int err);
|
2021-10-20 10:35:34 +00:00
|
|
|
void crypto_finalize_kpp_request(struct crypto_engine *engine,
|
|
|
|
struct kpp_request *req, int err);
|
2018-01-26 19:15:30 +00:00
|
|
|
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
|
|
|
|
struct skcipher_request *req, int err);
|
2016-08-31 12:02:57 +00:00
|
|
|
int crypto_engine_start(struct crypto_engine *engine);
|
|
|
|
int crypto_engine_stop(struct crypto_engine *engine);
|
|
|
|
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
|
crypto: engine - support for parallel requests based on retry mechanism
Added support for executing multiple requests, in parallel,
for crypto engine based on a retry mechanism.
If hardware was unable to execute a backlog request, enqueue it
back in front of crypto-engine queue, to keep the order
of requests.
A new variable is added, retry_support (this is to keep the
backward compatibility of crypto-engine) , which keeps track
whether the hardware has support for retry mechanism and,
also, if can run multiple requests.
If do_one_request() returns:
>= 0: hardware executed the request successfully;
< 0: this is the old error path. If hardware has support for retry
mechanism, the request is put back in front of crypto-engine queue.
For backwards compatibility, if the retry support is not available,
the crypto-engine will work as before.
If hardware queue is full (-ENOSPC), requeue request regardless
of MAY_BACKLOG flag.
If hardware throws any other error code (like -EIO, -EINVAL,
-ENOMEM, etc.) only MAY_BACKLOG requests are enqueued back into
crypto-engine's queue, since the others can be dropped.
The new crypto_engine_alloc_init_and_set function, initializes
crypto-engine, sets the maximum size for crypto-engine software
queue (not hardcoded anymore) and the retry_support variable
is set, by default, to false.
On crypto_pump_requests(), if do_one_request() returns >= 0,
a new request is send to hardware, until there is no space in
hardware and do_one_request() returns < 0.
By default, retry_support is false and crypto-engine will
work as before - will send requests to hardware,
one-by-one, on crypto_pump_requests(), and complete it, on
crypto_finalize_request(), and so on.
To support multiple requests, in each driver, retry_support
must be set on true, and if do_one_request() returns an error
the request must not be freed, since it will be enqueued back
into crypto-engine's queue.
When all drivers, that use crypto-engine now, will be updated for
retry mechanism, the retry_support variable can be removed.
Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2020-04-28 15:49:04 +00:00
|
|
|
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
|
|
|
|
bool retry_support,
|
2020-04-28 15:49:05 +00:00
|
|
|
int (*cbk_do_batch)(struct crypto_engine *engine),
|
crypto: engine - support for parallel requests based on retry mechanism
Added support for executing multiple requests, in parallel,
for crypto engine based on a retry mechanism.
If hardware was unable to execute a backlog request, enqueue it
back in front of crypto-engine queue, to keep the order
of requests.
A new variable is added, retry_support (this is to keep the
backward compatibility of crypto-engine) , which keeps track
whether the hardware has support for retry mechanism and,
also, if can run multiple requests.
If do_one_request() returns:
>= 0: hardware executed the request successfully;
< 0: this is the old error path. If hardware has support for retry
mechanism, the request is put back in front of crypto-engine queue.
For backwards compatibility, if the retry support is not available,
the crypto-engine will work as before.
If hardware queue is full (-ENOSPC), requeue request regardless
of MAY_BACKLOG flag.
If hardware throws any other error code (like -EIO, -EINVAL,
-ENOMEM, etc.) only MAY_BACKLOG requests are enqueued back into
crypto-engine's queue, since the others can be dropped.
The new crypto_engine_alloc_init_and_set function, initializes
crypto-engine, sets the maximum size for crypto-engine software
queue (not hardcoded anymore) and the retry_support variable
is set, by default, to false.
On crypto_pump_requests(), if do_one_request() returns >= 0,
a new request is send to hardware, until there is no space in
hardware and do_one_request() returns < 0.
By default, retry_support is false and crypto-engine will
work as before - will send requests to hardware,
one-by-one, on crypto_pump_requests(), and complete it, on
crypto_finalize_request(), and so on.
To support multiple requests, in each driver, retry_support
must be set on true, and if do_one_request() returns an error
the request must not be freed, since it will be enqueued back
into crypto-engine's queue.
When all drivers, that use crypto-engine now, will be updated for
retry mechanism, the retry_support variable can be removed.
Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2020-04-28 15:49:04 +00:00
|
|
|
bool rt, int qlen);
|
2023-09-23 10:08:06 +00:00
|
|
|
void crypto_engine_exit(struct crypto_engine *engine);
|
2016-08-31 12:02:57 +00:00
|
|
|
|
2023-08-13 06:54:49 +00:00
|
|
|
int crypto_engine_register_aead(struct aead_engine_alg *alg);
|
|
|
|
void crypto_engine_unregister_aead(struct aead_engine_alg *alg);
|
|
|
|
int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count);
|
|
|
|
void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count);
|
|
|
|
|
|
|
|
int crypto_engine_register_ahash(struct ahash_engine_alg *alg);
|
|
|
|
void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg);
|
|
|
|
int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count);
|
|
|
|
void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
|
|
|
|
int count);
|
|
|
|
|
|
|
|
int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg);
|
|
|
|
void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg);
|
|
|
|
|
|
|
|
int crypto_engine_register_kpp(struct kpp_engine_alg *alg);
|
|
|
|
void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg);
|
|
|
|
|
|
|
|
int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg);
|
|
|
|
void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg);
|
|
|
|
int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
|
|
|
|
int count);
|
|
|
|
void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
|
|
|
|
int count);
|
|
|
|
|
2016-08-31 12:02:57 +00:00
|
|
|
#endif /* _CRYPTO_ENGINE_H */
|