From ab9a244c396aae4aaa34b2399b82fc15ec2df8c1 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 8 Jul 2024 14:24:52 +0200 Subject: [PATCH 01/96] crypto: xor - fix template benchmarking Commit c055e3eae0f1 ("crypto: xor - use ktime for template benchmarking") switched from using jiffies to ktime-based performance benchmarking. This works nicely on machines which have a fine-grained ktime() clocksource as e.g. x86 machines with TSC. But other machines, e.g. my 4-way HP PARISC server, don't have such fine-grained clocksources, which is why it seems that 800 xor loops take zero seconds, which then shows up in the logs as: xor: measuring software checksum speed 8regs : -1018167296 MB/sec 8regs_prefetch : -1018167296 MB/sec 32regs : -1018167296 MB/sec 32regs_prefetch : -1018167296 MB/sec Fix this with some small modifications to the existing code to improve the algorithm to always produce correct results without introducing major delays for architectures with a fine-grained ktime() clocksource: a) Delay start of the timing until ktime() just advanced. On machines with a fast ktime() this should be just one additional ktime() call. b) Count the number of loops. Run at minimum 800 loops and finish earliest when the ktime() counter has progressed. With that the throughput can now be calculated more accurately under all conditions. Fixes: c055e3eae0f1 ("crypto: xor - use ktime for template benchmarking") Signed-off-by: Helge Deller Tested-by: John David Anglin v2: - clean up coding style (noticed & suggested by Herbert Xu) - rephrased & fixed typo in commit message Signed-off-by: Herbert Xu --- crypto/xor.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/crypto/xor.c b/crypto/xor.c index a1363162978c..f39621a57bb3 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -83,33 +83,30 @@ static void __init do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) { int speed; - int i, j; - ktime_t min, start, diff; + unsigned long reps; + ktime_t min, start, t0; tmpl->next = template_list; template_list = tmpl; preempt_disable(); - min = (ktime_t)S64_MAX; - for (i = 0; i < 3; i++) { - start = ktime_get(); - for (j = 0; j < REPS; j++) { - mb(); /* prevent loop optimization */ - tmpl->do_2(BENCH_SIZE, b1, b2); - mb(); - } - diff = ktime_sub(ktime_get(), start); - if (diff < min) - min = diff; - } + reps = 0; + t0 = ktime_get(); + /* delay start until time has advanced */ + while ((start = ktime_get()) == t0) + cpu_relax(); + do { + mb(); /* prevent loop optimization */ + tmpl->do_2(BENCH_SIZE, b1, b2); + mb(); + } while (reps++ < REPS || (t0 = ktime_get()) == start); + min = ktime_sub(t0, start); preempt_enable(); // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s] - if (!min) - min = 1; - speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); + speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); tmpl->speed = speed; pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed); From 47d96252099a7184b4bad852fcfa3c233c1d2f71 Mon Sep 17 00:00:00 2001 From: Jia He Date: Mon, 15 Jul 2024 07:20:23 +0000 Subject: [PATCH 02/96] crypto: arm64/poly1305 - move data to rodata section When objtool gains support for ARM in the future, it may encounter issues disassembling the following data in the .text section: > .Lzeros: > .long 0,0,0,0,0,0,0,0 > .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" > .align 2 Move it to .rodata which is a more appropriate section for read-only data. Signed-off-by: Jia He Signed-off-by: Herbert Xu --- arch/arm64/crypto/poly1305-armv8.pl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl index cbc980fb02e3..bcb70c9541e9 100644 --- a/arch/arm64/crypto/poly1305-armv8.pl +++ b/arch/arm64/crypto/poly1305-armv8.pl @@ -885,10 +885,13 @@ poly1305_blocks_neon: ret .size poly1305_blocks_neon,.-poly1305_blocks_neon +.pushsection .rodata .align 5 .Lzeros: .long 0,0,0,0,0,0,0,0 .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" +.popsection + .align 2 #if !defined(__KERNEL__) && !defined(_WIN64) .comm OPENSSL_armcap_P,4,4 From 4e190a5740aedc37654335089e7923bc8109dc3a Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Wed, 17 Jul 2024 07:44:56 -0400 Subject: [PATCH 03/96] crypto: qat - preserve ADF_GENERAL_SEC The ADF_GENERAL_SEC configuration section contains values that must be preserved during state transitions (down -> up, up -> down). This patch modifies the logic in adf_dev_shutdown() to maintain all key values within this section, rather than selectively saving and restoring only the ADF_SERVICES_ENABLED attribute. To achieve this, a new function has been introduced that deletes all configuration sections except for the one specified by name. This function is invoked during adf_dev_down(), with ADF_GENERAL_SEC as the argument. Consequently, the adf_dev_shutdown_cache_cfg() function has been removed as it is now redundant. Additionally, this patch eliminates the cache_config parameter from the adf_dev_down() function since ADF_GENERAL_SEC should always be retained. This change does not cause any side effects because all entries in the key-value store are cleared when a module is unloaded. Signed-off-by: Adam Guerin Co-developed-by: Michal Witwicki Signed-off-by: Michal Witwicki Reviewed-by: Giovanni Cabiddu Reviewed-by: Przemek Kitszel Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_420xx/adf_drv.c | 4 +- drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 4 +- drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c | 4 +- .../crypto/intel/qat/qat_c3xxxvf/adf_drv.c | 4 +- drivers/crypto/intel/qat/qat_c62x/adf_drv.c | 4 +- drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c | 4 +- drivers/crypto/intel/qat/qat_common/adf_aer.c | 2 +- drivers/crypto/intel/qat/qat_common/adf_cfg.c | 29 ++++++++++++++ drivers/crypto/intel/qat/qat_common/adf_cfg.h | 2 + .../intel/qat/qat_common/adf_common_drv.h | 2 +- .../crypto/intel/qat/qat_common/adf_ctl_drv.c | 6 +-- .../crypto/intel/qat/qat_common/adf_init.c | 40 ++----------------- .../crypto/intel/qat/qat_common/adf_sriov.c | 2 +- .../crypto/intel/qat/qat_common/adf_sysfs.c | 4 +- .../crypto/intel/qat/qat_common/adf_vf_isr.c | 2 +- .../crypto/intel/qat/qat_dh895xcc/adf_drv.c | 4 +- .../crypto/intel/qat/qat_dh895xccvf/adf_drv.c | 4 +- 17 files changed, 60 insertions(+), 61 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index 2a3598409eeb..f49818a13013 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -163,7 +163,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; @@ -177,7 +177,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index d26564cebdec..659905e45950 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -165,7 +165,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; @@ -179,7 +179,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index 956a4c85609a..4d18057745d4 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c index a8de9cd09c05..f0023cfb234c 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c @@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev) return; } adf_flush_vf_wq(accel_dev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index ad0ca4384998..e6b5de55434e 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c index 53b8ddb63364..2bd5b0ff00e3 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c @@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev) return; } adf_flush_vf_wq(accel_dev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 04260f61d042..ec7913ab00a2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -44,7 +44,7 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, adf_pf2vf_notify_restarting(accel_dev); adf_pf2vf_wait_for_restarting_complete(accel_dev); pci_clear_master(pdev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); return PCI_ERS_RESULT_NEED_RESET; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c index 2cf102ad4ca8..b0fc453fa3fb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c @@ -100,6 +100,8 @@ void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev) } static void adf_cfg_section_del_all(struct list_head *head); +static void adf_cfg_section_del_all_except(struct list_head *head, + const char *section_name); void adf_cfg_del_all(struct adf_accel_dev *accel_dev) { @@ -111,6 +113,17 @@ void adf_cfg_del_all(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); } +void adf_cfg_del_all_except(struct adf_accel_dev *accel_dev, + const char *section_name) +{ + struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; + + down_write(&dev_cfg_data->lock); + adf_cfg_section_del_all_except(&dev_cfg_data->sec_list, section_name); + up_write(&dev_cfg_data->lock); + clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); +} + /** * adf_cfg_dev_remove() - Clears acceleration device configuration table. * @accel_dev: Pointer to acceleration device. @@ -185,6 +198,22 @@ static void adf_cfg_section_del_all(struct list_head *head) } } +static void adf_cfg_section_del_all_except(struct list_head *head, + const char *section_name) +{ + struct list_head *list, *tmp; + struct adf_cfg_section *ptr; + + list_for_each_prev_safe(list, tmp, head) { + ptr = list_entry(list, struct adf_cfg_section, list); + if (!strcmp(ptr->name, section_name)) + continue; + adf_cfg_keyval_del_all(&ptr->param_head); + list_del(list); + kfree(ptr); + } +} + static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s, const char *key) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h index c0c9052b2213..2afa6f0d15c5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.h @@ -35,6 +35,8 @@ void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev); void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev); int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name); void adf_cfg_del_all(struct adf_accel_dev *accel_dev); +void adf_cfg_del_all_except(struct adf_accel_dev *accel_dev, + const char *section_name); int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const char *key, const void *val, diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 3bec9e20bad0..f7ecabdf7805 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -56,7 +56,7 @@ int adf_service_register(struct service_hndl *service); int adf_service_unregister(struct service_hndl *service); int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config); -int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config); +int adf_dev_down(struct adf_accel_dev *accel_dev); int adf_dev_restart(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c index 26a1662fafbb..70fa0f6497a9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c @@ -247,7 +247,7 @@ static void adf_ctl_stop_devices(u32 id) if (!accel_dev->is_vf) continue; - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); } } @@ -256,7 +256,7 @@ static void adf_ctl_stop_devices(u32 id) if (!adf_dev_started(accel_dev)) continue; - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); } } } @@ -319,7 +319,7 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, if (ret) { dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", ctl_data->device_id); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); } out: kfree(ctl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 74f0818c0703..593fe9abe88c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -393,9 +393,9 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); } - /* Delete configuration only if not restarting */ + /* If not restarting, delete all cfg sections except for GENERAL */ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) - adf_cfg_del_all(accel_dev); + adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); if (hw_data->exit_arb) hw_data->exit_arb(accel_dev); @@ -445,33 +445,7 @@ void adf_error_notifier(struct adf_accel_dev *accel_dev) } } -static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - - adf_dev_stop(accel_dev); - adf_dev_shutdown(accel_dev); - - if (!ret) { - ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); - if (ret) - return ret; - - ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, - services, ADF_STR); - if (ret) - return ret; - } - - return 0; -} - -int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig) +int adf_dev_down(struct adf_accel_dev *accel_dev) { int ret = 0; @@ -480,15 +454,9 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig) mutex_lock(&accel_dev->state_lock); - if (reconfig) { - ret = adf_dev_shutdown_cache_cfg(accel_dev); - goto out; - } - adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); -out: mutex_unlock(&accel_dev->state_lock); return ret; } @@ -535,7 +503,7 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev) if (!accel_dev) return -EFAULT; - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); ret = adf_dev_up(accel_dev, false); /* if device is already up return success*/ diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index 8d645e7e04aa..baf2e1cc1121 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -192,7 +192,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) return -EBUSY; } - ret = adf_dev_down(accel_dev, true); + ret = adf_dev_down(accel_dev); if (ret) return ret; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 4e7f70d4049d..4fcd61ff70d1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -62,7 +62,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, break; } - ret = adf_dev_down(accel_dev, true); + ret = adf_dev_down(accel_dev); if (ret) return ret; @@ -76,7 +76,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, } else if (ret) { dev_err(dev, "Failed to start device qat_dev%d\n", accel_id); - adf_dev_down(accel_dev, true); + adf_dev_down(accel_dev); return ret; } break; diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index cdbb2d687b1b..783ee8c0fc14 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -71,7 +71,7 @@ static void adf_dev_stop_async(struct work_struct *work) struct adf_accel_dev *accel_dev = stop_data->accel_dev; adf_dev_restarting_notify(accel_dev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); /* Re-enable PF2VF interrupts */ adf_enable_pf2vf_interrupts(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 40b456b8035b..2a50cce41515 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c index d59cb1ba2ad5..7cb015b55122 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c @@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev) return; } adf_flush_vf_wq(accel_dev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); From b6c7d36292d50627dbe6a57fa344f87c776971e6 Mon Sep 17 00:00:00 2001 From: Michal Witwicki Date: Wed, 17 Jul 2024 07:44:57 -0400 Subject: [PATCH 04/96] crypto: qat - disable IOV in adf_dev_stop() Disabling IOV has the side effect of re-enabling the AEs that might attempt to do DMAs into the heartbeat buffers. Move the disable_iov() function in adf_dev_stop() before the AEs are stopped. Fixes: ed8ccaef52fa ("crypto: qat - Add support for SRIOV") Signed-off-by: Michal Witwicki Reviewed-by: Giovanni Cabiddu Reviewed-by: Przemek Kitszel Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 593fe9abe88c..f189cce7d153 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -323,6 +323,8 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) if (hw_data->stop_timer) hw_data->stop_timer(accel_dev); + hw_data->disable_iov(accel_dev); + if (wait) msleep(100); @@ -386,8 +388,6 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) adf_tl_shutdown(accel_dev); - hw_data->disable_iov(accel_dev); - if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { hw_data->free_irq(accel_dev); clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); From 6f1b5236348fced7e7691a933327694b4106bc39 Mon Sep 17 00:00:00 2001 From: Michal Witwicki Date: Wed, 17 Jul 2024 07:44:58 -0400 Subject: [PATCH 05/96] crypto: qat - fix recovery flow for VFs When the PFVF protocol was updated to support version 5, i.e. ADF_PFVF_COMPAT_FALLBACK, the compatibility version for the VF was updated without supporting the message RESTARTING_COMPLETE required for such version. Add support for the ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE message in the VF drivers. This message is sent by the VF driver to the PF to notify the completion of the shutdown flow. Fixes: ec26f8e6c784 ("crypto: qat - update PFVF protocol for recovery") Signed-off-by: Michal Witwicki Reviewed-by: Giovanni Cabiddu Reviewed-by: Przemek Kitszel Signed-off-by: Herbert Xu --- .../crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c | 14 ++++++++++++++ .../crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h | 1 + drivers/crypto/intel/qat/qat_common/adf_vf_isr.c | 2 ++ 3 files changed, 17 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c index 1141258db4b6..10c91e56d6be 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c @@ -48,6 +48,20 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown); +void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE }; + + /* Check compatibility version */ + if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK) + return; + + if (adf_send_vf2pf_msg(accel_dev, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send Restarting complete event to PF\n"); +} +EXPORT_SYMBOL_GPL(adf_vf2pf_notify_restart_complete); + int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) { u8 pf_version; diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h index 71bc0e3f1d93..d79340ab3134 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h @@ -6,6 +6,7 @@ #if defined(CONFIG_PCI_IOV) int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev); void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev); +void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev); int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev); int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev); int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index 783ee8c0fc14..a4636ec9f9ca 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -13,6 +13,7 @@ #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_cfg_common.h" +#include "adf_pfvf_vf_msg.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" @@ -75,6 +76,7 @@ static void adf_dev_stop_async(struct work_struct *work) /* Re-enable PF2VF interrupts */ adf_enable_pf2vf_interrupts(accel_dev); + adf_vf2pf_notify_restart_complete(accel_dev); kfree(stop_data); } From cd8d2d74292c199b433ef77762bb1d28a4821784 Mon Sep 17 00:00:00 2001 From: Michal Witwicki Date: Wed, 17 Jul 2024 07:44:59 -0400 Subject: [PATCH 06/96] crypto: qat - ensure correct order in VF restarting handler In the process of sending the ADF_PF2VF_MSGTYPE_RESTARTING message to Virtual Functions (VFs), the Physical Function (PF) should set the `vf->restarting` flag to true before dispatching the message. This change is necessary to prevent a race condition where the handling of the ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE message (which sets the `vf->restarting` flag to false) runs immediately after the message is sent, but before the flag is set to true. Set the `vf->restarting` to true before sending the message ADF_PF2VF_MSGTYPE_RESTARTING, if supported by the version of the protocol and if the VF is started. Fixes: ec26f8e6c784 ("crypto: qat - update PFVF protocol for recovery") Signed-off-by: Michal Witwicki Reviewed-by: Giovanni Cabiddu Reviewed-by: Przemek Kitszel Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c index 0e31f4b41844..0cee3b23dee9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c @@ -18,14 +18,17 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - vf->restarting = false; + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) + vf->restarting = true; + else + vf->restarting = false; + if (!vf->init) continue; + if (adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); - else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) - vf->restarting = true; } } From ca88a2bdd4dd371e9f248d12528b1daf10db8648 Mon Sep 17 00:00:00 2001 From: Michal Witwicki Date: Wed, 17 Jul 2024 07:45:00 -0400 Subject: [PATCH 07/96] crypto: qat - allow disabling SR-IOV VFs The QAT driver allows enabling SR-IOV VFs but does not allow them to be disabled through a write to sysfs. Disabling SR-IOV VFs can be only achieved by bringing down and up a device using the attribute /sys/bus/pci/devices//qat/state. The documentation for the sysfs attribute `sriov_numvfs` specifies that "a userspace application wanting to disable the VFs would write a zero to this file". Add support for disabling SR-IOV VFs by writing '0' to the 'sriov_numvfs' attribute in sysfs. Enabling or disabling SR-IOV always requires adf_dev_down() to be called. This action subsequently leads to the deletion of the ADF_KERNEL_SEC configuration section. The keys ADF_NUM_CY and ADF_NUM_DC within that section must be set to '0', otherwise, the driver will register into the Linux Crypto Framework. Because of this, the configuration in the ADF_KERNEL_SEC section must be added before every sriov_enable. Signed-off-by: Michal Witwicki Reviewed-by: Giovanni Cabiddu Reviewed-by: Przemek Kitszel Signed-off-by: Herbert Xu --- .../crypto/intel/qat/qat_common/adf_sriov.c | 194 ++++++++++++------ 1 file changed, 128 insertions(+), 66 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index baf2e1cc1121..c75d0b6cb0ad 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -86,11 +86,133 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) return pci_enable_sriov(pdev, totalvfs); } +static int adf_add_sriov_configuration(struct adf_accel_dev *accel_dev) +{ + unsigned long val = 0; + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + return ret; + + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + return ret; + + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + return ret; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; +} + +static int adf_do_disable_sriov(struct adf_accel_dev *accel_dev) +{ + int ret; + + if (adf_dev_in_use(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Cannot disable SR-IOV, device in use\n"); + return -EBUSY; + } + + if (adf_dev_started(accel_dev)) { + if (adf_devmgr_in_reset(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Cannot disable SR-IOV, device in reset\n"); + return -EBUSY; + } + + ret = adf_dev_down(accel_dev); + if (ret) + goto err_del_cfg; + } + + adf_disable_sriov(accel_dev); + + ret = adf_dev_up(accel_dev, true); + if (ret) + goto err_del_cfg; + + return 0; + +err_del_cfg: + adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); + return ret; +} + +static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + int totalvfs = pci_sriov_get_totalvfs(pdev); + unsigned long val; + int ret; + + if (!device_iommu_mapped(&GET_DEV(accel_dev))) { + dev_warn(&GET_DEV(accel_dev), + "IOMMU should be enabled for SR-IOV to work correctly\n"); + return -EINVAL; + } + + if (adf_dev_started(accel_dev)) { + if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Device busy\n"); + return -EBUSY; + } + + ret = adf_dev_down(accel_dev); + if (ret) + return ret; + } + + ret = adf_add_sriov_configuration(accel_dev); + if (ret) + goto err_del_cfg; + + /* Allocate memory for VF info structs */ + accel_dev->pf.vf_info = kcalloc(totalvfs, sizeof(struct adf_accel_vf_info), + GFP_KERNEL); + ret = -ENOMEM; + if (!accel_dev->pf.vf_info) + goto err_del_cfg; + + ret = adf_dev_up(accel_dev, false); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", + accel_dev->accel_id); + goto err_free_vf_info; + } + + ret = adf_enable_sriov(accel_dev); + if (ret) + goto err_free_vf_info; + + val = 1; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, + &val, ADF_DEC); + if (ret) + goto err_free_vf_info; + + return totalvfs; + +err_free_vf_info: + adf_dev_down(accel_dev); + kfree(accel_dev->pf.vf_info); + accel_dev->pf.vf_info = NULL; + return ret; +err_del_cfg: + adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); + return ret; +} + void adf_reenable_sriov(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - unsigned long val = 0; if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, cfg)) @@ -99,15 +221,9 @@ void adf_reenable_sriov(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) return; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC)) + if (adf_add_sriov_configuration(accel_dev)) return; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC)) - return; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); dev_dbg(&pdev->dev, "Re-enabling SRIOV\n"); adf_enable_sriov(accel_dev); } @@ -168,70 +284,16 @@ EXPORT_SYMBOL_GPL(adf_disable_sriov); int adf_sriov_configure(struct pci_dev *pdev, int numvfs) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); - int totalvfs = pci_sriov_get_totalvfs(pdev); - unsigned long val; - int ret; if (!accel_dev) { dev_err(&pdev->dev, "Failed to find accel_dev\n"); return -EFAULT; } - if (!device_iommu_mapped(&pdev->dev)) - dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n"); - - if (accel_dev->pf.vf_info) { - dev_info(&pdev->dev, "Already enabled for this device\n"); - return -EINVAL; - } - - if (adf_dev_started(accel_dev)) { - if (adf_devmgr_in_reset(accel_dev) || - adf_dev_in_use(accel_dev)) { - dev_err(&GET_DEV(accel_dev), "Device busy\n"); - return -EBUSY; - } - - ret = adf_dev_down(accel_dev); - if (ret) - return ret; - } - - if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) - return -EFAULT; - val = 0; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - ADF_NUM_CY, (void *)&val, ADF_DEC)) - return -EFAULT; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - return ret; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - - /* Allocate memory for VF info structs */ - accel_dev->pf.vf_info = kcalloc(totalvfs, - sizeof(struct adf_accel_vf_info), - GFP_KERNEL); - if (!accel_dev->pf.vf_info) - return -ENOMEM; - - if (adf_dev_up(accel_dev, false)) { - dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", - accel_dev->accel_id); - return -EFAULT; - } - - ret = adf_enable_sriov(accel_dev); - if (ret) - return ret; - - val = 1; - adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, - &val, ADF_DEC); - - return numvfs; + if (numvfs) + return adf_do_enable_sriov(accel_dev); + else + return adf_do_disable_sriov(accel_dev); } EXPORT_SYMBOL_GPL(adf_sriov_configure); From 2fc990581c0988ad35b41a1b4eca840deb3297e2 Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Thu, 18 Jul 2024 18:14:18 -0700 Subject: [PATCH 08/96] crypto: ppc/curve25519 - add missing MODULE_DESCRIPTION() macro Since commit 1fffe7a34c89 ("script: modpost: emit a warning when the description is missing"), a module without a MODULE_DESCRIPTION() will result in a warning with make W=1. The following warning is being observed when building ppc64le with CRYPTO_CURVE25519_PPC64=m: WARNING: modpost: missing MODULE_DESCRIPTION() in arch/powerpc/crypto/curve25519-ppc64le.o Add the missing invocation of the MODULE_DESCRIPTION() macro. Signed-off-by: Jeff Johnson Signed-off-by: Herbert Xu --- arch/powerpc/crypto/curve25519-ppc64le-core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/crypto/curve25519-ppc64le-core.c b/arch/powerpc/crypto/curve25519-ppc64le-core.c index 4e3e44ea4484..f7810be0b292 100644 --- a/arch/powerpc/crypto/curve25519-ppc64le-core.c +++ b/arch/powerpc/crypto/curve25519-ppc64le-core.c @@ -295,5 +295,6 @@ module_exit(curve25519_mod_exit); MODULE_ALIAS_CRYPTO("curve25519"); MODULE_ALIAS_CRYPTO("curve25519-ppc64le"); +MODULE_DESCRIPTION("PPC64le Curve25519 scalar multiplication with 51 bits limbs"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Danny Tsen "); From e0d3b845a1b10b7b5abdad7ecc69d45b2aab3209 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 24 Jul 2024 11:09:43 -0500 Subject: [PATCH 09/96] crypto: iaa - Fix potential use after free bug The free_device_compression_mode(iaa_device, device_mode) function frees "device_mode" but it iss passed to iaa_compression_modes[i]->free() a few lines later resulting in a use after free. The good news is that, so far as I can tell, nothing implements the ->free() function and the use after free happens in dead code. But, with this fix, when something does implement it, we'll be ready. :) Fixes: b190447e0fa3 ("crypto: iaa - Add compression mode management along with fixed mode") Signed-off-by: Dan Carpenter Reviewed-by: Tom Zanussi Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index e810d286ee8c..237f87000070 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -495,10 +495,10 @@ static void remove_device_compression_modes(struct iaa_device *iaa_device) if (!device_mode) continue; - free_device_compression_mode(iaa_device, device_mode); - iaa_device->compression_modes[i] = NULL; if (iaa_compression_modes[i]->free) iaa_compression_modes[i]->free(device_mode); + free_device_compression_mode(iaa_device, device_mode); + iaa_device->compression_modes[i] = NULL; } } From b0cd6f4c3f1963439e9f26363c2bd40a05239f0a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 6 Aug 2024 13:45:59 +0800 Subject: [PATCH 10/96] Revert "crypto: arm64/poly1305 - move data to rodata section" This reverts commit 47d96252099a7184b4bad852fcfa3c233c1d2f71. It causes build issues as detected by the kernel test robot. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202408040817.OWKXtCv6-lkp@intel.com/ Signed-off-by: Herbert Xu --- arch/arm64/crypto/poly1305-armv8.pl | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl index bcb70c9541e9..cbc980fb02e3 100644 --- a/arch/arm64/crypto/poly1305-armv8.pl +++ b/arch/arm64/crypto/poly1305-armv8.pl @@ -885,13 +885,10 @@ poly1305_blocks_neon: ret .size poly1305_blocks_neon,.-poly1305_blocks_neon -.pushsection .rodata .align 5 .Lzeros: .long 0,0,0,0,0,0,0,0 .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" -.popsection - .align 2 #if !defined(__KERNEL__) && !defined(_WIN64) .comm OPENSSL_armcap_P,4,4 From c8981d9230d808e62c65349d0b255c7f4b9087d6 Mon Sep 17 00:00:00 2001 From: Pavitrakumar M Date: Mon, 29 Jul 2024 09:43:45 +0530 Subject: [PATCH 11/96] crypto: spacc - Add SPAcc Skcipher support Signed-off-by: Bhoomika K Signed-off-by: Pavitrakumar M Acked-by: Ruud Derwig Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_core.c | 1129 ++++++++++++++++++++ drivers/crypto/dwc-spacc/spacc_core.h | 826 ++++++++++++++ drivers/crypto/dwc-spacc/spacc_device.c | 338 ++++++ drivers/crypto/dwc-spacc/spacc_device.h | 231 ++++ drivers/crypto/dwc-spacc/spacc_hal.c | 367 +++++++ drivers/crypto/dwc-spacc/spacc_hal.h | 114 ++ drivers/crypto/dwc-spacc/spacc_interrupt.c | 316 ++++++ drivers/crypto/dwc-spacc/spacc_manager.c | 650 +++++++++++ drivers/crypto/dwc-spacc/spacc_skcipher.c | 712 ++++++++++++ 9 files changed, 4683 insertions(+) create mode 100644 drivers/crypto/dwc-spacc/spacc_core.c create mode 100644 drivers/crypto/dwc-spacc/spacc_core.h create mode 100644 drivers/crypto/dwc-spacc/spacc_device.c create mode 100644 drivers/crypto/dwc-spacc/spacc_device.h create mode 100644 drivers/crypto/dwc-spacc/spacc_hal.c create mode 100644 drivers/crypto/dwc-spacc/spacc_hal.h create mode 100644 drivers/crypto/dwc-spacc/spacc_interrupt.c create mode 100644 drivers/crypto/dwc-spacc/spacc_manager.c create mode 100644 drivers/crypto/dwc-spacc/spacc_skcipher.c diff --git a/drivers/crypto/dwc-spacc/spacc_core.c b/drivers/crypto/dwc-spacc/spacc_core.c new file mode 100644 index 000000000000..2bad071efd9b --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_core.c @@ -0,0 +1,1129 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include "spacc_hal.h" +#include "spacc_core.h" + +static const u8 spacc_ctrl_map[SPACC_CTRL_VER_SIZE][SPACC_CTRL_MAPSIZE] = { + { 0, 8, 4, 12, 24, 16, 31, 25, 26, 27, 28, 29, 14, 15 }, + { 0, 8, 3, 12, 24, 16, 31, 25, 26, 27, 28, 29, 14, 15 }, + { 0, 4, 8, 13, 15, 16, 24, 25, 26, 27, 28, 29, 30, 31 } +}; + +static const int keysizes[2][7] = { + /* 1 2 4 8 16 32 64 */ + { 5, 8, 16, 24, 32, 0, 0 }, /* cipher key sizes*/ + { 8, 16, 20, 24, 32, 64, 128 }, /* hash key sizes*/ +}; + + +/* bits are 40, 64, 128, 192, 256, and top bit for hash */ +static const unsigned char template[] = { + [CRYPTO_MODE_NULL] = 0, + [CRYPTO_MODE_AES_ECB] = 28, /* AESECB 128/224/256 */ + [CRYPTO_MODE_AES_CBC] = 28, /* AESCBC 128/224/256 */ + [CRYPTO_MODE_AES_CTR] = 28, /* AESCTR 128/224/256 */ + [CRYPTO_MODE_AES_CCM] = 28, /* AESCCM 128/224/256 */ + [CRYPTO_MODE_AES_GCM] = 28, /* AESGCM 128/224/256 */ + [CRYPTO_MODE_AES_F8] = 28, /* AESF8 128/224/256 */ + [CRYPTO_MODE_AES_XTS] = 20, /* AESXTS 128/256 */ + [CRYPTO_MODE_AES_CFB] = 28, /* AESCFB 128/224/256 */ + [CRYPTO_MODE_AES_OFB] = 28, /* AESOFB 128/224/256 */ + [CRYPTO_MODE_AES_CS1] = 28, /* AESCS1 128/224/256 */ + [CRYPTO_MODE_AES_CS2] = 28, /* AESCS2 128/224/256 */ + [CRYPTO_MODE_AES_CS3] = 28, /* AESCS3 128/224/256 */ + [CRYPTO_MODE_MULTI2_ECB] = 0, /* MULTI2 */ + [CRYPTO_MODE_MULTI2_CBC] = 0, /* MULTI2 */ + [CRYPTO_MODE_MULTI2_OFB] = 0, /* MULTI2 */ + [CRYPTO_MODE_MULTI2_CFB] = 0, /* MULTI2 */ + [CRYPTO_MODE_3DES_CBC] = 8, /* 3DES CBC */ + [CRYPTO_MODE_3DES_ECB] = 8, /* 3DES ECB */ + [CRYPTO_MODE_DES_CBC] = 2, /* DES CBC */ + [CRYPTO_MODE_DES_ECB] = 2, /* DES ECB */ + [CRYPTO_MODE_KASUMI_ECB] = 4, /* KASUMI ECB */ + [CRYPTO_MODE_KASUMI_F8] = 4, /* KASUMI F8 */ + [CRYPTO_MODE_SNOW3G_UEA2] = 4, /* SNOW3G */ + [CRYPTO_MODE_ZUC_UEA3] = 4, /* ZUC */ + [CRYPTO_MODE_CHACHA20_STREAM] = 16, /* CHACHA20 */ + [CRYPTO_MODE_CHACHA20_POLY1305] = 16, /* CHACHA20 */ + [CRYPTO_MODE_SM4_ECB] = 4, /* SM4ECB 128 */ + [CRYPTO_MODE_SM4_CBC] = 4, /* SM4CBC 128 */ + [CRYPTO_MODE_SM4_CFB] = 4, /* SM4CFB 128 */ + [CRYPTO_MODE_SM4_OFB] = 4, /* SM4OFB 128 */ + [CRYPTO_MODE_SM4_CTR] = 4, /* SM4CTR 128 */ + [CRYPTO_MODE_SM4_CCM] = 4, /* SM4CCM 128 */ + [CRYPTO_MODE_SM4_GCM] = 4, /* SM4GCM 128 */ + [CRYPTO_MODE_SM4_F8] = 4, /* SM4F8 128 */ + [CRYPTO_MODE_SM4_XTS] = 4, /* SM4XTS 128 */ + [CRYPTO_MODE_SM4_CS1] = 4, /* SM4CS1 128 */ + [CRYPTO_MODE_SM4_CS2] = 4, /* SM4CS2 128 */ + [CRYPTO_MODE_SM4_CS3] = 4, /* SM4CS3 128 */ + + [CRYPTO_MODE_HASH_MD5] = 242, + [CRYPTO_MODE_HMAC_MD5] = 242, + [CRYPTO_MODE_HASH_SHA1] = 242, + [CRYPTO_MODE_HMAC_SHA1] = 242, + [CRYPTO_MODE_HASH_SHA224] = 242, + [CRYPTO_MODE_HMAC_SHA224] = 242, + [CRYPTO_MODE_HASH_SHA256] = 242, + [CRYPTO_MODE_HMAC_SHA256] = 242, + [CRYPTO_MODE_HASH_SHA384] = 242, + [CRYPTO_MODE_HMAC_SHA384] = 242, + [CRYPTO_MODE_HASH_SHA512] = 242, + [CRYPTO_MODE_HMAC_SHA512] = 242, + [CRYPTO_MODE_HASH_SHA512_224] = 242, + [CRYPTO_MODE_HMAC_SHA512_224] = 242, + [CRYPTO_MODE_HASH_SHA512_256] = 242, + [CRYPTO_MODE_HMAC_SHA512_256] = 242, + [CRYPTO_MODE_MAC_XCBC] = 154, /* XaCBC */ + [CRYPTO_MODE_MAC_CMAC] = 154, /* CMAC */ + [CRYPTO_MODE_MAC_KASUMI_F9] = 130, /* KASUMI */ + [CRYPTO_MODE_MAC_SNOW3G_UIA2] = 130, /* SNOW */ + [CRYPTO_MODE_MAC_ZUC_UIA3] = 130, /* ZUC */ + [CRYPTO_MODE_MAC_POLY1305] = 144, + [CRYPTO_MODE_SSLMAC_MD5] = 130, + [CRYPTO_MODE_SSLMAC_SHA1] = 132, + [CRYPTO_MODE_HASH_CRC32] = 0, + [CRYPTO_MODE_MAC_MICHAEL] = 129, + + [CRYPTO_MODE_HASH_SHA3_224] = 242, + [CRYPTO_MODE_HASH_SHA3_256] = 242, + [CRYPTO_MODE_HASH_SHA3_384] = 242, + [CRYPTO_MODE_HASH_SHA3_512] = 242, + [CRYPTO_MODE_HASH_SHAKE128] = 242, + [CRYPTO_MODE_HASH_SHAKE256] = 242, + [CRYPTO_MODE_HASH_CSHAKE128] = 130, + [CRYPTO_MODE_HASH_CSHAKE256] = 130, + [CRYPTO_MODE_MAC_KMAC128] = 242, + [CRYPTO_MODE_MAC_KMAC256] = 242, + [CRYPTO_MODE_MAC_KMACXOF128] = 242, + [CRYPTO_MODE_MAC_KMACXOF256] = 242, + [CRYPTO_MODE_HASH_SM3] = 242, + [CRYPTO_MODE_HMAC_SM3] = 242, + [CRYPTO_MODE_MAC_SM4_XCBC] = 242, + [CRYPTO_MODE_MAC_SM4_CMAC] = 242, +}; + +int spacc_sg_to_ddt(struct device *dev, struct scatterlist *sg, + int nbytes, struct pdu_ddt *ddt, int dma_direction) +{ + struct scatterlist *sg_entry, *sgl; + int nents, orig_nents; + int i, rc; + + orig_nents = sg_nents(sg); + if (orig_nents > 1) { + sgl = sg_last(sg, orig_nents); + if (sgl->length == 0) + orig_nents--; + } + nents = dma_map_sg(dev, sg, orig_nents, dma_direction); + + if (nents <= 0) + return -ENOMEM; + + /* require ATOMIC operations */ + rc = pdu_ddt_init(ddt, nents | 0x80000000); + if (rc < 0) { + dma_unmap_sg(dev, sg, nents, dma_direction); + return -EIO; + } + + for_each_sg(sg, sg_entry, nents, i) { + pdu_ddt_add(ddt, sg_dma_address(sg_entry), + sg_dma_len(sg_entry)); + } + + dma_sync_sg_for_device(dev, sg, nents, dma_direction); + + return nents; +} + +int spacc_set_operation(struct spacc_device *spacc, int handle, int op, + u32 prot, uint32_t icvcmd, uint32_t icvoff, + uint32_t icvsz, uint32_t sec_key) +{ + int ret = CRYPTO_OK; + struct spacc_job *job = NULL; + + if (handle < 0 || handle > SPACC_MAX_JOBS) + return -ENXIO; + + job = &spacc->job[handle]; + if (!job) + return -EIO; + + job->op = op; + if (op == OP_ENCRYPT) + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ENCRYPT); + else + job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_ENCRYPT); + + switch (prot) { + case ICV_HASH: /* HASH of plaintext */ + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_PT); + break; + case ICV_HASH_ENCRYPT: + /* HASH the plaintext and encrypt the lot */ + /* ICV_PT and ICV_APPEND must be set too */ + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_ENC); + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_PT); + /* This mode is not valid when BIT_ALIGN != 0 */ + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_APPEND); + break; + case ICV_ENCRYPT_HASH: /* HASH the ciphertext */ + job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_ICV_PT); + job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_ICV_ENC); + break; + case ICV_IGNORE: + break; + default: + ret = -EINVAL; + } + + job->icv_len = icvsz; + + switch (icvcmd) { + case IP_ICV_OFFSET: + job->icv_offset = icvoff; + job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_ICV_APPEND); + break; + case IP_ICV_APPEND: + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_APPEND); + break; + case IP_ICV_IGNORE: + break; + default: + ret = -EINVAL; + } + + if (sec_key) + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_SEC_KEY); + + return ret; +} + +static int _spacc_fifo_full(struct spacc_device *spacc, uint32_t prio) +{ + if (spacc->config.is_qos) + return readl(spacc->regmap + SPACC_REG_FIFO_STAT) & + SPACC_FIFO_STAT_CMDX_FULL(prio); + else + return readl(spacc->regmap + SPACC_REG_FIFO_STAT) & + SPACC_FIFO_STAT_CMD0_FULL; +} + +/* When proc_sz != 0 it overrides the ddt_len value + * defined in the context referenced by 'job_idx' + */ +int spacc_packet_enqueue_ddt_ex(struct spacc_device *spacc, int use_jb, + int job_idx, struct pdu_ddt *src_ddt, + struct pdu_ddt *dst_ddt, u32 proc_sz, + uint32_t aad_offset, uint32_t pre_aad_sz, + u32 post_aad_sz, uint32_t iv_offset, + uint32_t prio) +{ + int i; + struct spacc_job *job; + int ret = CRYPTO_OK, proc_len; + + if (job_idx < 0 || job_idx > SPACC_MAX_JOBS) + return -ENXIO; + + switch (prio) { + case SPACC_SW_CTRL_PRIO_MED: + if (spacc->config.cmd1_fifo_depth == 0) + return -EINVAL; + break; + case SPACC_SW_CTRL_PRIO_LOW: + if (spacc->config.cmd2_fifo_depth == 0) + return -EINVAL; + break; + } + + job = &spacc->job[job_idx]; + if (!job) + return -EIO; + + /* process any jobs in the jb*/ + if (use_jb && spacc_process_jb(spacc) != 0) + goto fifo_full; + + if (_spacc_fifo_full(spacc, prio)) { + if (use_jb) + goto fifo_full; + else + return -EBUSY; + } + + /* compute the length we must process, in decrypt mode + * with an ICV (hash, hmac or CCM modes) + * we must subtract the icv length from the buffer size + */ + if (proc_sz == SPACC_AUTO_SIZE) { + proc_len = src_ddt->len; + + if (job->op == OP_DECRYPT && + (job->hash_mode > 0 || + job->enc_mode == CRYPTO_MODE_AES_CCM || + job->enc_mode == CRYPTO_MODE_AES_GCM) && + !(job->ctrl & SPACC_CTRL_MASK(SPACC_CTRL_ICV_ENC))) + proc_len = src_ddt->len - job->icv_len; + } else { + proc_len = proc_sz; + } + + if (pre_aad_sz & SPACC_AADCOPY_FLAG) { + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_AAD_COPY); + pre_aad_sz &= ~(SPACC_AADCOPY_FLAG); + } else { + job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_AAD_COPY); + } + + job->pre_aad_sz = pre_aad_sz; + job->post_aad_sz = post_aad_sz; + + if (spacc->config.dma_type == SPACC_DMA_DDT) { + pdu_io_cached_write(spacc->regmap + SPACC_REG_SRC_PTR, + (uint32_t)src_ddt->phys, + &spacc->cache.src_ptr); + pdu_io_cached_write(spacc->regmap + SPACC_REG_DST_PTR, + (uint32_t)dst_ddt->phys, + &spacc->cache.dst_ptr); + } else if (spacc->config.dma_type == SPACC_DMA_LINEAR) { + pdu_io_cached_write(spacc->regmap + SPACC_REG_SRC_PTR, + (uint32_t)src_ddt->virt[0], + &spacc->cache.src_ptr); + pdu_io_cached_write(spacc->regmap + SPACC_REG_DST_PTR, + (uint32_t)dst_ddt->virt[0], + &spacc->cache.dst_ptr); + } else { + return -EIO; + } + + pdu_io_cached_write(spacc->regmap + SPACC_REG_PROC_LEN, + proc_len - job->post_aad_sz, + &spacc->cache.proc_len); + pdu_io_cached_write(spacc->regmap + SPACC_REG_ICV_LEN, + job->icv_len, &spacc->cache.icv_len); + pdu_io_cached_write(spacc->regmap + SPACC_REG_ICV_OFFSET, + job->icv_offset, &spacc->cache.icv_offset); + pdu_io_cached_write(spacc->regmap + SPACC_REG_PRE_AAD_LEN, + job->pre_aad_sz, &spacc->cache.pre_aad); + pdu_io_cached_write(spacc->regmap + SPACC_REG_POST_AAD_LEN, + job->post_aad_sz, &spacc->cache.post_aad); + pdu_io_cached_write(spacc->regmap + SPACC_REG_IV_OFFSET, + iv_offset, &spacc->cache.iv_offset); + pdu_io_cached_write(spacc->regmap + SPACC_REG_OFFSET, + aad_offset, &spacc->cache.offset); + pdu_io_cached_write(spacc->regmap + SPACC_REG_AUX_INFO, + AUX_DIR(job->auxinfo_dir) | + AUX_BIT_ALIGN(job->auxinfo_bit_align) | + AUX_CBC_CS(job->auxinfo_cs_mode), + &spacc->cache.aux); + + if (job->first_use == 1) { + writel(job->ckey_sz | SPACC_SET_KEY_CTX(job->ctx_idx), + spacc->regmap + SPACC_REG_KEY_SZ); + writel(job->hkey_sz | SPACC_SET_KEY_CTX(job->ctx_idx), + spacc->regmap + SPACC_REG_KEY_SZ); + } + + job->job_swid = spacc->job_next_swid; + spacc->job_lookup[job->job_swid] = job_idx; + spacc->job_next_swid = + (spacc->job_next_swid + 1) % SPACC_MAX_JOBS; + writel(SPACC_SW_CTRL_ID_SET(job->job_swid) | + SPACC_SW_CTRL_PRIO_SET(prio), + spacc->regmap + SPACC_REG_SW_CTRL); + writel(job->ctrl, spacc->regmap + SPACC_REG_CTRL); + + /* Clear an expansion key after the first call*/ + if (job->first_use == 1) { + job->first_use = 0; + job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_KEY_EXP); + } + + return ret; + +fifo_full: + /* try to add a job to the job buffers*/ + i = spacc->jb_head + 1; + if (i == SPACC_MAX_JOB_BUFFERS) + i = 0; + + if (i == spacc->jb_tail) + return -EBUSY; + + spacc->job_buffer[spacc->jb_head] = (struct spacc_job_buffer) { + .active = 1, + .job_idx = job_idx, + .src = src_ddt, + .dst = dst_ddt, + .proc_sz = proc_sz, + .aad_offset = aad_offset, + .pre_aad_sz = pre_aad_sz, + .post_aad_sz = post_aad_sz, + .iv_offset = iv_offset, + .prio = prio + }; + + spacc->jb_head = i; + + return CRYPTO_USED_JB; +} + +int spacc_packet_enqueue_ddt(struct spacc_device *spacc, int job_idx, + struct pdu_ddt *src_ddt, struct pdu_ddt *dst_ddt, + u32 proc_sz, u32 aad_offset, uint32_t pre_aad_sz, + uint32_t post_aad_sz, u32 iv_offset, uint32_t prio) +{ + int ret; + unsigned long lock_flags; + + spin_lock_irqsave(&spacc->lock, lock_flags); + ret = spacc_packet_enqueue_ddt_ex(spacc, 1, job_idx, src_ddt, + dst_ddt, proc_sz, aad_offset, + pre_aad_sz, post_aad_sz, + iv_offset, prio); + spin_unlock_irqrestore(&spacc->lock, lock_flags); + + return ret; +} + +static int spacc_packet_dequeue(struct spacc_device *spacc, int job_idx) +{ + int ret = CRYPTO_OK; + struct spacc_job *job = &spacc->job[job_idx]; + unsigned long lock_flag; + + spin_lock_irqsave(&spacc->lock, lock_flag); + + if (!job && !(job_idx == SPACC_JOB_IDX_UNUSED)) { + ret = -EIO; + } else if (job->job_done) { + job->job_done = 0; + ret = job->job_err; + } else { + ret = -EINPROGRESS; + } + + spin_unlock_irqrestore(&spacc->lock, lock_flag); + + return ret; +} + +int spacc_isenabled(struct spacc_device *spacc, int mode, int keysize) +{ + int x; + + if (mode < 0 || mode > CRYPTO_MODE_LAST) + return 0; + + if (mode == CRYPTO_MODE_NULL || + mode == CRYPTO_MODE_AES_XTS || + mode == CRYPTO_MODE_SM4_XTS || + mode == CRYPTO_MODE_AES_F8 || + mode == CRYPTO_MODE_SM4_F8 || + spacc->config.modes[mode] & 128) + return 1; + + for (x = 0; x < 6; x++) { + if (keysizes[0][x] == keysize) { + if (spacc->config.modes[mode] & (1 << x)) + return 1; + else + return 0; + } + } + + return 0; +} + +/* Releases a crypto context back into appropriate module's pool*/ +int spacc_close(struct spacc_device *dev, int handle) +{ + return spacc_job_release(dev, handle); +} + +static void spacc_static_modes(struct spacc_device *spacc, int x, int y) +{ + /* Disable the algos that as not supported here */ + switch (x) { + case CRYPTO_MODE_AES_F8: + case CRYPTO_MODE_AES_CFB: + case CRYPTO_MODE_AES_OFB: + case CRYPTO_MODE_MULTI2_ECB: + case CRYPTO_MODE_MULTI2_CBC: + case CRYPTO_MODE_MULTI2_CFB: + case CRYPTO_MODE_MULTI2_OFB: + case CRYPTO_MODE_MAC_POLY1305: + case CRYPTO_MODE_HASH_CRC32: + /* Disable the modes */ + spacc->config.modes[x] &= ~(1 << y); + break; + default: + break;/* Algos are enabled */ + } +} + +int spacc_static_config(struct spacc_device *spacc) +{ + + int x, y; + + for (x = 0; x < ARRAY_SIZE(template); x++) { + spacc->config.modes[x] = template[x]; + + for (y = 0; y < (ARRAY_SIZE(keysizes[0])); y++) { + /* List static modes */ + spacc_static_modes(spacc, x, y); + } + } + + return 0; +} + +int spacc_clone_handle(struct spacc_device *spacc, int old_handle, + void *cbdata) +{ + int new_handle; + + new_handle = spacc_job_request(spacc, spacc->job[old_handle].ctx_idx); + if (new_handle < 0) + return new_handle; + + spacc->job[new_handle] = spacc->job[old_handle]; + spacc->job[new_handle].job_used = new_handle; + spacc->job[new_handle].cbdata = cbdata; + + return new_handle; +} + +/* Allocates a job for spacc module context and initialize + * it with an appropriate type. + */ +int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctxid, + int secure_mode, spacc_callback cb, void *cbdata) +{ + u32 ctrl = 0; + int job_idx = 0; + int ret = CRYPTO_OK; + struct spacc_job *job = NULL; + + job_idx = spacc_job_request(spacc, ctxid); + if (job_idx < 0) + return -EIO; + + job = &spacc->job[job_idx]; + + if (secure_mode && job->ctx_idx > spacc->config.num_sec_ctx) { + pr_debug("ERR: For secure contexts"); + pr_debug("ERR: Job ctx ID is outside allowed range\n"); + spacc_job_release(spacc, job_idx); + return -EIO; + } + + job->auxinfo_cs_mode = 0; + job->auxinfo_bit_align = 0; + job->auxinfo_dir = 0; + job->icv_len = 0; + + switch (enc) { + case CRYPTO_MODE_NULL: + break; + case CRYPTO_MODE_AES_ECB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); + break; + case CRYPTO_MODE_AES_CBC: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + break; + case CRYPTO_MODE_AES_CS3: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + job->auxinfo_cs_mode = 3; + break; + case CRYPTO_MODE_AES_CTR: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CTR); + break; + case CRYPTO_MODE_AES_XTS: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_XTS); + break; + case CRYPTO_MODE_3DES_CBC: + case CRYPTO_MODE_DES_CBC: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_DES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + break; + case CRYPTO_MODE_3DES_ECB: + case CRYPTO_MODE_DES_ECB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_DES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); + break; + case CRYPTO_MODE_CHACHA20_STREAM: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_CHACHA20); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CHACHA_STREAM); + break; + case CRYPTO_MODE_SM4_ECB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); + break; + case CRYPTO_MODE_SM4_CBC: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + break; + case CRYPTO_MODE_SM4_CS3: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + job->auxinfo_cs_mode = 3; + break; + case CRYPTO_MODE_SM4_CTR: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CTR); + break; + case CRYPTO_MODE_SM4_XTS: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_XTS); + break; + default: + ret = -EOPNOTSUPP; + } + + ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_MSG_BEGIN) | + SPACC_CTRL_MASK(SPACC_CTRL_MSG_END); + + if (ret != CRYPTO_OK) { + spacc_job_release(spacc, job_idx); + } else { + ret = job_idx; + job->first_use = 1; + job->enc_mode = enc; + job->hash_mode = hash; + job->ckey_sz = 0; + job->hkey_sz = 0; + job->job_done = 0; + job->job_swid = 0; + job->job_secure = !!secure_mode; + + job->auxinfo_bit_align = 0; + job->job_err = -EINPROGRESS; + job->ctrl = ctrl | + SPACC_CTRL_SET(SPACC_CTRL_CTX_IDX, + job->ctx_idx); + job->cb = cb; + job->cbdata = cbdata; + } + + return ret; +} + +static int spacc_xof_stringsize_autodetect(struct spacc_device *spacc) +{ + void *virt; + dma_addr_t dma; + struct pdu_ddt ddt; + int ss, alg, i, stat; + unsigned long spacc_ctrl[2] = {0xF400B400, 0xF400D400}; + unsigned char buf[256]; + unsigned long buflen, rbuf; + unsigned char test_str[6] = {0x01, 0x20, 0x54, 0x45, 0x53, 0x54}; + unsigned char md[2][16] = { + {0xc3, 0x6d, 0x0a, 0x88, 0xfa, 0x37, 0x4c, 0x9b, + 0x44, 0x74, 0xeb, 0x00, 0x5f, 0xe8, 0xca, 0x25}, + {0x68, 0x77, 0x04, 0x11, 0xf8, 0xe3, 0xb0, 0x1e, + 0x0d, 0xbf, 0x71, 0x6a, 0xe9, 0x87, 0x1a, 0x0d}}; + + virt = dma_alloc_coherent(get_ddt_device(), 256, &dma, GFP_KERNEL); + if (!virt) + return -EIO; + + if (pdu_ddt_init(&ddt, 1)) { + dma_free_coherent(get_ddt_device(), 256, virt, dma); + return -EIO; + } + pdu_ddt_add(&ddt, dma, 256); + + /* populate registers for jobs*/ + writel((uint32_t)ddt.phys, spacc->regmap + SPACC_REG_SRC_PTR); + writel((uint32_t)ddt.phys, spacc->regmap + SPACC_REG_DST_PTR); + + writel(16, spacc->regmap + SPACC_REG_PROC_LEN); + writel(16, spacc->regmap + SPACC_REG_PRE_AAD_LEN); + writel(16, spacc->regmap + SPACC_REG_ICV_LEN); + writel(6, spacc->regmap + SPACC_REG_KEY_SZ); + writel(0, spacc->regmap + SPACC_REG_SW_CTRL); + + /* repeat for 2 algorithms, CSHAKE128 and KMAC128*/ + for (alg = 0; (alg < 2) && (spacc->config.string_size == 0); alg++) { + /* repeat for 4 string_size sizes*/ + for (ss = 0; ss < 4; ss++) { + buflen = (32UL << ss); + if (buflen > spacc->config.hash_page_size) + break; + + /* clear I/O memory*/ + memset(virt, 0, 256); + + /* clear buf and then insert test string*/ + memset(buf, 0, sizeof(buf)); + memcpy(buf, test_str, sizeof(test_str)); + memcpy(buf + (buflen >> 1), test_str, sizeof(test_str)); + + /* write key context */ + pdu_to_dev_s(spacc->regmap + SPACC_CTX_HASH_KEY, + buf, + spacc->config.hash_page_size >> 2, + spacc->config.spacc_endian); + + /* write ctrl register */ + writel(spacc_ctrl[alg], spacc->regmap + SPACC_REG_CTRL); + + /* wait for job to complete */ + for (i = 0; i < 20; i++) { + rbuf = 0; + rbuf = readl(spacc->regmap + + SPACC_REG_FIFO_STAT) & + SPACC_FIFO_STAT_STAT_EMPTY; + if (!rbuf) { + /* check result, if it matches, + * we have string_size + */ + writel(1, spacc->regmap + + SPACC_REG_STAT_POP); + rbuf = 0; + rbuf = readl(spacc->regmap + + SPACC_REG_STATUS); + stat = SPACC_GET_STATUS_RET_CODE(rbuf); + if ((!memcmp(virt, md[alg], 16)) && + stat == SPACC_OK) { + spacc->config.string_size = + (16 << ss); + } + break; + } + } + } + } + + /* reset registers */ + writel(0, spacc->regmap + SPACC_REG_IRQ_CTRL); + writel(0, spacc->regmap + SPACC_REG_IRQ_EN); + writel(0xFFFFFFFF, spacc->regmap + SPACC_REG_IRQ_STAT); + + writel(0, spacc->regmap + SPACC_REG_SRC_PTR); + writel(0, spacc->regmap + SPACC_REG_DST_PTR); + writel(0, spacc->regmap + SPACC_REG_PROC_LEN); + writel(0, spacc->regmap + SPACC_REG_ICV_LEN); + writel(0, spacc->regmap + SPACC_REG_PRE_AAD_LEN); + + pdu_ddt_free(&ddt); + dma_free_coherent(get_ddt_device(), 256, virt, dma); + + return CRYPTO_OK; +} + +/* free up the memory */ +void spacc_fini(struct spacc_device *spacc) +{ + vfree(spacc->ctx); + vfree(spacc->job); +} + +int spacc_init(void __iomem *baseaddr, struct spacc_device *spacc, + struct pdu_info *info) +{ + unsigned long id; + char version_string[3][16] = { "SPACC", "SPACC-PDU" }; + char idx_string[2][16] = { "(Normal Port)", "(Secure Port)" }; + char dma_type_string[4][16] = { "Unknown", "Scattergather", "Linear", + "Unknown" }; + + if (!baseaddr) { + pr_debug("ERR: baseaddr is NULL\n"); + return -1; + } + if (!spacc) { + pr_debug("ERR: spacc is NULL\n"); + return -1; + } + + memset(spacc, 0, sizeof(*spacc)); + spin_lock_init(&spacc->lock); + spin_lock_init(&spacc->ctx_lock); + + /* assign the baseaddr*/ + spacc->regmap = baseaddr; + + /* version info*/ + spacc->config.version = info->spacc_version.version; + spacc->config.pdu_version = (info->pdu_config.major << 4) | + info->pdu_config.minor; + spacc->config.project = info->spacc_version.project; + spacc->config.is_pdu = info->spacc_version.is_pdu; + spacc->config.is_qos = info->spacc_version.qos; + + /* misc*/ + spacc->config.is_partial = info->spacc_version.partial; + spacc->config.num_ctx = info->spacc_config.num_ctx; + spacc->config.ciph_page_size = 1U << + info->spacc_config.ciph_ctx_page_size; + + spacc->config.hash_page_size = 1U << + info->spacc_config.hash_ctx_page_size; + + spacc->config.dma_type = info->spacc_config.dma_type; + spacc->config.idx = info->spacc_version.vspacc_idx; + spacc->config.cmd0_fifo_depth = info->spacc_config.cmd0_fifo_depth; + spacc->config.cmd1_fifo_depth = info->spacc_config.cmd1_fifo_depth; + spacc->config.cmd2_fifo_depth = info->spacc_config.cmd2_fifo_depth; + spacc->config.stat_fifo_depth = info->spacc_config.stat_fifo_depth; + spacc->config.fifo_cnt = 1; + spacc->config.is_ivimport = info->spacc_version.ivimport; + + /* ctrl register map*/ + if (spacc->config.version <= 0x4E) + spacc->config.ctrl_map = spacc_ctrl_map[SPACC_CTRL_VER_0]; + else if (spacc->config.version <= 0x60) + spacc->config.ctrl_map = spacc_ctrl_map[SPACC_CTRL_VER_1]; + else + spacc->config.ctrl_map = spacc_ctrl_map[SPACC_CTRL_VER_2]; + + spacc->job_next_swid = 0; + spacc->wdcnt = 0; + spacc->config.wd_timer = SPACC_WD_TIMER_INIT; + + /* version 4.10 uses IRQ, + * above uses WD and we don't support below 4.00 + */ + if (spacc->config.version < 0x40) { + pr_debug("ERR: Unsupported SPAcc version\n"); + return -EIO; + } else if (spacc->config.version < 0x4B) { + spacc->op_mode = SPACC_OP_MODE_IRQ; + } else { + spacc->op_mode = SPACC_OP_MODE_WD; + } + + /* set threshold and enable irq + * on 4.11 and newer cores we can derive this + * from the HW reported depths. + */ + if (spacc->config.stat_fifo_depth == 1) + spacc->config.ideal_stat_level = 1; + else if (spacc->config.stat_fifo_depth <= 4) + spacc->config.ideal_stat_level = + spacc->config.stat_fifo_depth - 1; + else if (spacc->config.stat_fifo_depth <= 8) + spacc->config.ideal_stat_level = + spacc->config.stat_fifo_depth - 2; + else + spacc->config.ideal_stat_level = + spacc->config.stat_fifo_depth - 4; + + /* determine max PROClen value */ + writel(0xFFFFFFFF, spacc->regmap + SPACC_REG_PROC_LEN); + spacc->config.max_msg_size = readl(spacc->regmap + SPACC_REG_PROC_LEN); + + /* read config info*/ + if (spacc->config.is_pdu) { + pr_debug("PDU:\n"); + pr_debug(" MAJOR : %u\n", info->pdu_config.major); + pr_debug(" MINOR : %u\n", info->pdu_config.minor); + } + + id = readl(spacc->regmap + SPACC_REG_ID); + pr_debug("SPACC ID: (%08lx)\n", (unsigned long)id); + pr_debug(" MAJOR : %x\n", info->spacc_version.major); + pr_debug(" MINOR : %x\n", info->spacc_version.minor); + pr_debug(" QOS : %x\n", info->spacc_version.qos); + pr_debug(" IVIMPORT : %x\n", spacc->config.is_ivimport); + + if (spacc->config.version >= 0x48) + pr_debug(" TYPE : %lx (%s)\n", SPACC_ID_TYPE(id), + version_string[SPACC_ID_TYPE(id) & 3]); + + pr_debug(" AUX : %x\n", info->spacc_version.qos); + pr_debug(" IDX : %lx %s\n", SPACC_ID_VIDX(id), + spacc->config.is_secure ? + (idx_string[spacc->config.is_secure_port & 1]) : ""); + pr_debug(" PARTIAL : %x\n", info->spacc_version.partial); + pr_debug(" PROJECT : %x\n", info->spacc_version.project); + + if (spacc->config.version >= 0x48) + id = readl(spacc->regmap + SPACC_REG_CONFIG); + else + id = 0xFFFFFFFF; + + pr_debug("SPACC CFG: (%08lx)\n", id); + pr_debug(" CTX CNT : %u\n", info->spacc_config.num_ctx); + pr_debug(" VSPACC CNT : %u\n", info->spacc_config.num_vspacc); + pr_debug(" CIPH SZ : %-3lu bytes\n", 1UL << + info->spacc_config.ciph_ctx_page_size); + pr_debug(" HASH SZ : %-3lu bytes\n", 1UL << + info->spacc_config.hash_ctx_page_size); + pr_debug(" DMA TYPE : %u (%s)\n", info->spacc_config.dma_type, + dma_type_string[info->spacc_config.dma_type & 3]); + pr_debug(" MAX PROCLEN: %lu bytes\n", (unsigned long) + spacc->config.max_msg_size); + pr_debug(" FIFO CONFIG :\n"); + pr_debug(" CMD0 DEPTH: %d\n", spacc->config.cmd0_fifo_depth); + + if (spacc->config.is_qos) { + pr_debug(" CMD1 DEPTH: %d\n", + spacc->config.cmd1_fifo_depth); + pr_debug(" CMD2 DEPTH: %d\n", + spacc->config.cmd2_fifo_depth); + } + pr_debug(" STAT DEPTH: %d\n", spacc->config.stat_fifo_depth); + + if (spacc->config.dma_type == SPACC_DMA_DDT) { + writel(0x1234567F, baseaddr + SPACC_REG_DST_PTR); + writel(0xDEADBEEF, baseaddr + SPACC_REG_SRC_PTR); + + if (((readl(baseaddr + SPACC_REG_DST_PTR)) != + (0x1234567F & SPACC_DST_PTR_PTR)) || + ((readl(baseaddr + SPACC_REG_SRC_PTR)) != + (0xDEADBEEF & SPACC_SRC_PTR_PTR))) { + pr_debug("ERR: Failed to set pointers\n"); + goto ERR; + } + } + + /* zero the IRQ CTRL/EN register + * (to make sure we're in a sane state) + */ + writel(0, spacc->regmap + SPACC_REG_IRQ_CTRL); + writel(0, spacc->regmap + SPACC_REG_IRQ_EN); + writel(0xFFFFFFFF, spacc->regmap + SPACC_REG_IRQ_STAT); + + /* init cache*/ + memset(&spacc->cache, 0, sizeof(spacc->cache)); + writel(0, spacc->regmap + SPACC_REG_SRC_PTR); + writel(0, spacc->regmap + SPACC_REG_DST_PTR); + writel(0, spacc->regmap + SPACC_REG_PROC_LEN); + writel(0, spacc->regmap + SPACC_REG_ICV_LEN); + writel(0, spacc->regmap + SPACC_REG_ICV_OFFSET); + writel(0, spacc->regmap + SPACC_REG_PRE_AAD_LEN); + writel(0, spacc->regmap + SPACC_REG_POST_AAD_LEN); + writel(0, spacc->regmap + SPACC_REG_IV_OFFSET); + writel(0, spacc->regmap + SPACC_REG_OFFSET); + writel(0, spacc->regmap + SPACC_REG_AUX_INFO); + + spacc->ctx = vmalloc(sizeof(struct spacc_ctx) * spacc->config.num_ctx); + if (!spacc->ctx) + goto ERR; + + spacc->job = vmalloc(sizeof(struct spacc_job) * SPACC_MAX_JOBS); + if (!spacc->job) + goto ERR; + + /* initialize job_idx and lookup table */ + spacc_job_init_all(spacc); + + /* initialize contexts */ + spacc_ctx_init_all(spacc); + + /* autodetect and set string size setting*/ + if (spacc->config.version == 0x61 || spacc->config.version >= 0x65) + spacc_xof_stringsize_autodetect(spacc); + + return CRYPTO_OK; +ERR: + spacc_fini(spacc); + pr_debug("ERR: Crypto Failed\n"); + + return -EIO; +} + +/* callback function to initialize tasklet running */ +void spacc_pop_jobs(unsigned long data) +{ + int num = 0; + struct spacc_priv *priv = (struct spacc_priv *)data; + struct spacc_device *spacc = &priv->spacc; + + /* decrement the WD CNT here since + * now we're actually going to respond + * to the IRQ completely + */ + if (spacc->wdcnt) + --(spacc->wdcnt); + + spacc_pop_packets(spacc, &num); +} + +int spacc_remove(struct platform_device *pdev) +{ + struct spacc_device *spacc; + struct spacc_priv *priv = platform_get_drvdata(pdev); + + /* free test vector memory*/ + spacc = &priv->spacc; + spacc_fini(spacc); + + tasklet_kill(&priv->pop_jobs); + + /* devm functions do proper cleanup */ + pdu_mem_deinit(&pdev->dev); + dev_dbg(&pdev->dev, "removed!\n"); + + return 0; +} + +int spacc_set_key_exp(struct spacc_device *spacc, int job_idx) +{ + struct spacc_ctx *ctx = NULL; + struct spacc_job *job = NULL; + + if (job_idx < 0 || job_idx > SPACC_MAX_JOBS) { + pr_debug("ERR: Invalid Job id specified (out of range)\n"); + return -ENXIO; + } + + job = &spacc->job[job_idx]; + ctx = context_lookup_by_job(spacc, job_idx); + + if (!ctx) { + pr_debug("ERR: Failed to find ctx id\n"); + return -EIO; + } + + job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_KEY_EXP); + + return CRYPTO_OK; +} + +int spacc_compute_xcbc_key(struct spacc_device *spacc, int mode_id, + int job_idx, const unsigned char *key, + int keylen, unsigned char *xcbc_out) +{ + unsigned char *buf; + dma_addr_t bufphys; + struct pdu_ddt ddt; + unsigned char iv[16]; + int err, i, handle, usecbc, ctx_idx; + + if (job_idx >= 0 && job_idx < SPACC_MAX_JOBS) + ctx_idx = spacc->job[job_idx].ctx_idx; + else + ctx_idx = -1; + + if (mode_id == CRYPTO_MODE_MAC_XCBC) { + /* figure out if we can schedule the key */ + if (spacc_isenabled(spacc, CRYPTO_MODE_AES_ECB, 16)) + usecbc = 0; + else if (spacc_isenabled(spacc, CRYPTO_MODE_AES_CBC, 16)) + usecbc = 1; + else + return -1; + } else if (mode_id == CRYPTO_MODE_MAC_SM4_XCBC) { + /* figure out if we can schedule the key */ + if (spacc_isenabled(spacc, CRYPTO_MODE_SM4_ECB, 16)) + usecbc = 0; + else if (spacc_isenabled(spacc, CRYPTO_MODE_SM4_CBC, 16)) + usecbc = 1; + else + return -1; + } else { + return -1; + } + + memset(iv, 0, sizeof(iv)); + memset(&ddt, 0, sizeof(ddt)); + + buf = dma_alloc_coherent(get_ddt_device(), 64, &bufphys, GFP_KERNEL); + if (!buf) + return -EINVAL; + + handle = -1; + + /* set to 1111...., 2222...., 333... */ + for (i = 0; i < 48; i++) + buf[i] = (i >> 4) + 1; + + /* build DDT */ + err = pdu_ddt_init(&ddt, 1); + if (err) + goto xcbc_err; + + pdu_ddt_add(&ddt, bufphys, 48); + + /* open a handle in either CBC or ECB mode */ + if (mode_id == CRYPTO_MODE_MAC_XCBC) { + handle = spacc_open(spacc, (usecbc ? + CRYPTO_MODE_AES_CBC : CRYPTO_MODE_AES_ECB), + CRYPTO_MODE_NULL, ctx_idx, 0, NULL, NULL); + if (handle < 0) { + err = handle; + goto xcbc_err; + } + } else if (mode_id == CRYPTO_MODE_MAC_SM4_XCBC) { + handle = spacc_open(spacc, (usecbc ? + CRYPTO_MODE_SM4_CBC : CRYPTO_MODE_SM4_ECB), + CRYPTO_MODE_NULL, ctx_idx, 0, NULL, NULL); + if (handle < 0) { + err = handle; + goto xcbc_err; + } + } + spacc_set_operation(spacc, handle, OP_ENCRYPT, 0, 0, 0, 0, 0); + + if (usecbc) { + /* we can do the ECB work in CBC using three + * jobs with the IVreset to zero each time + */ + for (i = 0; i < 3; i++) { + spacc_write_context(spacc, handle, + SPACC_CRYPTO_OPERATION, key, + keylen, iv, 16); + err = spacc_packet_enqueue_ddt(spacc, handle, &ddt, + &ddt, 16, (i * 16) | + ((i * 16) << 16), 0, 0, 0, 0); + if (err != CRYPTO_OK) + goto xcbc_err; + + do { + err = spacc_packet_dequeue(spacc, handle); + } while (err == -EINPROGRESS); + if (err != CRYPTO_OK) + goto xcbc_err; + } + } else { + /* do the 48 bytes as a single SPAcc job this is the ideal case + * but only possible if ECB was enabled in the core + */ + spacc_write_context(spacc, handle, SPACC_CRYPTO_OPERATION, + key, keylen, iv, 16); + err = spacc_packet_enqueue_ddt(spacc, handle, &ddt, &ddt, 48, + 0, 0, 0, 0, 0); + if (err != CRYPTO_OK) + goto xcbc_err; + + do { + err = spacc_packet_dequeue(spacc, handle); + } while (err == -EINPROGRESS); + if (err != CRYPTO_OK) + goto xcbc_err; + } + + /* now we can copy the key*/ + memcpy(xcbc_out, buf, 48); + memset(buf, 0, 64); + +xcbc_err: + dma_free_coherent(get_ddt_device(), 64, buf, bufphys); + pdu_ddt_free(&ddt); + if (handle >= 0) + spacc_close(spacc, handle); + + if (err) + return -EINVAL; + + return 0; +} diff --git a/drivers/crypto/dwc-spacc/spacc_core.h b/drivers/crypto/dwc-spacc/spacc_core.h new file mode 100644 index 000000000000..399b7c976151 --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_core.h @@ -0,0 +1,826 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + + +#ifndef SPACC_CORE_H_ +#define SPACC_CORE_H_ + +#include +#include +#include +#include +#include +#include "spacc_hal.h" + +enum { + SPACC_DMA_UNDEF = 0, + SPACC_DMA_DDT = 1, + SPACC_DMA_LINEAR = 2 +}; + +enum { + SPACC_OP_MODE_IRQ = 0, + SPACC_OP_MODE_WD = 1 /* watchdog */ +}; + +#define OP_ENCRYPT 0 +#define OP_DECRYPT 1 + +#define SPACC_CRYPTO_OPERATION 1 +#define SPACC_HASH_OPERATION 2 + +#define SPACC_AADCOPY_FLAG 0x80000000 + +#define SPACC_AUTO_SIZE (-1) + +#define SPACC_WD_LIMIT 0x80 +#define SPACC_WD_TIMER_INIT 0x40000 + +/********* Register Offsets **********/ +#define SPACC_REG_IRQ_EN 0x00000L +#define SPACC_REG_IRQ_STAT 0x00004L +#define SPACC_REG_IRQ_CTRL 0x00008L +#define SPACC_REG_FIFO_STAT 0x0000CL +#define SPACC_REG_SDMA_BRST_SZ 0x00010L + +#define SPACC_REG_SRC_PTR 0x00020L +#define SPACC_REG_DST_PTR 0x00024L +#define SPACC_REG_OFFSET 0x00028L +#define SPACC_REG_PRE_AAD_LEN 0x0002CL +#define SPACC_REG_POST_AAD_LEN 0x00030L + +#define SPACC_REG_PROC_LEN 0x00034L +#define SPACC_REG_ICV_LEN 0x00038L +#define SPACC_REG_ICV_OFFSET 0x0003CL +#define SPACC_REG_IV_OFFSET 0x00040L + +#define SPACC_REG_SW_CTRL 0x00044L +#define SPACC_REG_AUX_INFO 0x00048L +#define SPACC_REG_CTRL 0x0004CL + +#define SPACC_REG_STAT_POP 0x00050L +#define SPACC_REG_STATUS 0x00054L + +#define SPACC_REG_STAT_WD_CTRL 0x00080L + +#define SPACC_REG_KEY_SZ 0x00100L + +#define SPACC_REG_VIRTUAL_RQST 0x00140L +#define SPACC_REG_VIRTUAL_ALLOC 0x00144L +#define SPACC_REG_VIRTUAL_PRIO 0x00148L + +#define SPACC_REG_ID 0x00180L +#define SPACC_REG_CONFIG 0x00184L +#define SPACC_REG_CONFIG2 0x00190L + +#define SPACC_REG_SECURE_CTRL 0x001C0L +#define SPACC_REG_SECURE_RELEASE 0x001C4 + +#define SPACC_REG_SK_LOAD 0x00200L +#define SPACC_REG_SK_STAT 0x00204L +#define SPACC_REG_SK_KEY 0x00240L + +#define SPACC_REG_VERSION_EXT_3 0x00194L + +/* out 8MB from base of SPACC */ +#define SPACC_REG_SKP 0x800000UL + +/********** Context Offsets **********/ +#define SPACC_CTX_CIPH_KEY 0x04000L +#define SPACC_CTX_HASH_KEY 0x08000L + +/******** Sub-Context Offsets ********/ +#define SPACC_CTX_AES_KEY 0x00 +#define SPACC_CTX_AES_IV 0x20 + +#define SPACC_CTX_DES_KEY 0x08 +#define SPACC_CTX_DES_IV 0x00 + +/* use these to loop over CMDX macros */ +#define SPACC_CMDX_MAX 1 +#define SPACC_CMDX_MAX_QOS 3 +/********** IRQ_EN Bit Masks **********/ + +#define _SPACC_IRQ_CMD0 0 +#define _SPACC_IRQ_STAT 4 +#define _SPACC_IRQ_STAT_WD 12 +#define _SPACC_IRQ_GLBL 31 + +#define SPACC_IRQ_EN_CMD(x) (1UL << _SPACC_IRQ_CMD0 << (x)) +#define SPACC_IRQ_EN_STAT BIT(_SPACC_IRQ_STAT) +#define SPACC_IRQ_EN_STAT_WD BIT(_SPACC_IRQ_STAT_WD) +#define SPACC_IRQ_EN_GLBL BIT(_SPACC_IRQ_GLBL) + +/********* IRQ_STAT Bitmasks *********/ + +#define SPACC_IRQ_STAT_CMDX(x) (1UL << _SPACC_IRQ_CMD0 << (x)) +#define SPACC_IRQ_STAT_STAT BIT(_SPACC_IRQ_STAT) +#define SPACC_IRQ_STAT_STAT_WD BIT(_SPACC_IRQ_STAT_WD) + +#define SPACC_IRQ_STAT_CLEAR_STAT(spacc) writel(SPACC_IRQ_STAT_STAT, \ + (spacc)->regmap + SPACC_REG_IRQ_STAT) + +#define SPACC_IRQ_STAT_CLEAR_STAT_WD(spacc) writel(SPACC_IRQ_STAT_STAT_WD, \ + (spacc)->regmap + SPACC_REG_IRQ_STAT) + +#define SPACC_IRQ_STAT_CLEAR_CMDX(spacc, x) writel(SPACC_IRQ_STAT_CMDX(x), \ + (spacc)->regmap + SPACC_REG_IRQ_STAT) + +/********* IRQ_CTRL Bitmasks *********/ +/* CMD0 = 0; for QOS, CMD1 = 8, CMD2 = 16 */ +#define _SPACC_IRQ_CTRL_CMDX_CNT(x) (8 * (x)) +#define SPACC_IRQ_CTRL_CMDX_CNT_SET(x, n) \ + (((n) & 0xFF) << _SPACC_IRQ_CTRL_CMDX_CNT(x)) +#define SPACC_IRQ_CTRL_CMDX_CNT_MASK(x) \ + (0xFF << _SPACC_IRQ_CTRL_CMDX_CNT(x)) + +/* STAT_CNT is at 16 and for QOS at 24 */ +#define _SPACC_IRQ_CTRL_STAT_CNT 16 +#define SPACC_IRQ_CTRL_STAT_CNT_SET(n) ((n) << _SPACC_IRQ_CTRL_STAT_CNT) +#define SPACC_IRQ_CTRL_STAT_CNT_MASK (0x1FF << _SPACC_IRQ_CTRL_STAT_CNT) + +#define _SPACC_IRQ_CTRL_STAT_CNT_QOS 24 +#define SPACC_IRQ_CTRL_STAT_CNT_SET_QOS(n) \ + ((n) << _SPACC_IRQ_CTRL_STAT_CNT_QOS) +#define SPACC_IRQ_CTRL_STAT_CNT_MASK_QOS \ + (0x7F << _SPACC_IRQ_CTRL_STAT_CNT_QOS) + +/******** FIFO_STAT Bitmasks *********/ + +/* SPACC with QOS */ +#define SPACC_FIFO_STAT_CMDX_CNT_MASK(x) \ + (0x7F << ((x) * 8)) +#define SPACC_FIFO_STAT_CMDX_CNT_GET(x, y) \ + (((y) & SPACC_FIFO_STAT_CMDX_CNT_MASK(x)) >> ((x) * 8)) +#define SPACC_FIFO_STAT_CMDX_FULL(x) (1UL << (7 + (x) * 8)) + +#define _SPACC_FIFO_STAT_STAT_CNT_QOS 24 +#define SPACC_FIFO_STAT_STAT_CNT_MASK_QOS \ + (0x7F << _SPACC_FIFO_STAT_STAT_CNT_QOS) +#define SPACC_FIFO_STAT_STAT_CNT_GET_QOS(y) \ + (((y) & \ + SPACC_FIFO_STAT_STAT_CNT_MASK_QOS) >> _SPACC_FIFO_STAT_STAT_CNT_QOS) + +/* SPACC without QOS */ +#define SPACC_FIFO_STAT_CMD0_CNT_MASK (0x1FF) +#define SPACC_FIFO_STAT_CMD0_CNT_GET(y) ((y) & SPACC_FIFO_STAT_CMD0_CNT_MASK) +#define _SPACC_FIFO_STAT_CMD0_FULL 15 +#define SPACC_FIFO_STAT_CMD0_FULL BIT(_SPACC_FIFO_STAT_CMD0_FULL) + +#define _SPACC_FIFO_STAT_STAT_CNT 16 +#define SPACC_FIFO_STAT_STAT_CNT_MASK (0x1FF << _SPACC_FIFO_STAT_STAT_CNT) +#define SPACC_FIFO_STAT_STAT_CNT_GET(y) \ + (((y) & SPACC_FIFO_STAT_STAT_CNT_MASK) >> _SPACC_FIFO_STAT_STAT_CNT) + +/* both */ +#define _SPACC_FIFO_STAT_STAT_EMPTY 31 +#define SPACC_FIFO_STAT_STAT_EMPTY BIT(_SPACC_FIFO_STAT_STAT_EMPTY) + +/********* SRC/DST_PTR Bitmasks **********/ + +#define SPACC_SRC_PTR_PTR 0xFFFFFFF8 +#define SPACC_DST_PTR_PTR 0xFFFFFFF8 + +/********** OFFSET Bitmasks **********/ + +#define SPACC_OFFSET_SRC_O 0 +#define SPACC_OFFSET_SRC_W 16 +#define SPACC_OFFSET_DST_O 16 +#define SPACC_OFFSET_DST_W 16 + +#define SPACC_MIN_CHUNK_SIZE 1024 +#define SPACC_MAX_CHUNK_SIZE 16384 + +/********* PKT_LEN Bitmasks **********/ + +#ifndef _SPACC_PKT_LEN_PROC_LEN +#define _SPACC_PKT_LEN_PROC_LEN 0 +#endif +#ifndef _SPACC_PKT_LEN_AAD_LEN +#define _SPACC_PKT_LEN_AAD_LEN 16 +#endif + +/********* SW_CTRL Bitmasks ***********/ + +#define _SPACC_SW_CTRL_ID_0 0 +#define SPACC_SW_CTRL_ID_W 8 +#define SPACC_SW_CTRL_ID_MASK (0xFF << _SPACC_SW_CTRL_ID_0) +#define SPACC_SW_CTRL_ID_GET(y) \ + (((y) & SPACC_SW_CTRL_ID_MASK) >> _SPACC_SW_CTRL_ID_0) +#define SPACC_SW_CTRL_ID_SET(id) \ + (((id) & SPACC_SW_CTRL_ID_MASK) >> _SPACC_SW_CTRL_ID_0) + +#define _SPACC_SW_CTRL_PRIO 30 +#define SPACC_SW_CTRL_PRIO_MASK 0x3 +#define SPACC_SW_CTRL_PRIO_SET(prio) \ + (((prio) & SPACC_SW_CTRL_PRIO_MASK) << _SPACC_SW_CTRL_PRIO) + +/* Priorities */ +#define SPACC_SW_CTRL_PRIO_HI 0 +#define SPACC_SW_CTRL_PRIO_MED 1 +#define SPACC_SW_CTRL_PRIO_LOW 2 + +/*********** SECURE_CTRL bitmasks *********/ +#define _SPACC_SECURE_CTRL_MS_SRC 0 +#define _SPACC_SECURE_CTRL_MS_DST 1 +#define _SPACC_SECURE_CTRL_MS_DDT 2 +#define _SPACC_SECURE_CTRL_LOCK 31 + +#define SPACC_SECURE_CTRL_MS_SRC BIT(_SPACC_SECURE_CTRL_MS_SRC) +#define SPACC_SECURE_CTRL_MS_DST BIT(_SPACC_SECURE_CTRL_MS_DST) +#define SPACC_SECURE_CTRL_MS_DDT BIT(_SPACC_SECURE_CTRL_MS_DDT) +#define SPACC_SECURE_CTRL_LOCK BIT(_SPACC_SECURE_CTRL_LOCK) + +/********* SKP bits **************/ +#define _SPACC_SK_LOAD_CTX_IDX 0 +#define _SPACC_SK_LOAD_ALG 8 +#define _SPACC_SK_LOAD_MODE 12 +#define _SPACC_SK_LOAD_SIZE 16 +#define _SPACC_SK_LOAD_ENC_EN 30 +#define _SPACC_SK_LOAD_DEC_EN 31 +#define _SPACC_SK_STAT_BUSY 0 + +#define SPACC_SK_LOAD_ENC_EN BIT(_SPACC_SK_LOAD_ENC_EN) +#define SPACC_SK_LOAD_DEC_EN BIT(_SPACC_SK_LOAD_DEC_EN) +#define SPACC_SK_STAT_BUSY BIT(_SPACC_SK_STAT_BUSY) + +/*********** CTRL Bitmasks ***********/ +/* These CTRL field locations vary with SPACC version + * and if they are used, they should be set accordingly + */ +#define _SPACC_CTRL_CIPH_ALG 0 +#define _SPACC_CTRL_HASH_ALG 4 +#define _SPACC_CTRL_CIPH_MODE 8 +#define _SPACC_CTRL_HASH_MODE 12 +#define _SPACC_CTRL_MSG_BEGIN 14 +#define _SPACC_CTRL_MSG_END 15 +#define _SPACC_CTRL_CTX_IDX 16 +#define _SPACC_CTRL_ENCRYPT 24 +#define _SPACC_CTRL_AAD_COPY 25 +#define _SPACC_CTRL_ICV_PT 26 +#define _SPACC_CTRL_ICV_ENC 27 +#define _SPACC_CTRL_ICV_APPEND 28 +#define _SPACC_CTRL_KEY_EXP 29 +#define _SPACC_CTRL_SEC_KEY 31 + +/* CTRL bitmasks for 4.15+ cores */ +#define _SPACC_CTRL_CIPH_ALG_415 0 +#define _SPACC_CTRL_HASH_ALG_415 3 +#define _SPACC_CTRL_CIPH_MODE_415 8 +#define _SPACC_CTRL_HASH_MODE_415 12 + +/********* Virtual Spacc Priority Bitmasks **********/ +#define _SPACC_VPRIO_MODE 0 +#define _SPACC_VPRIO_WEIGHT 8 + +/********* AUX INFO Bitmasks *********/ +#define _SPACC_AUX_INFO_DIR 0 +#define _SPACC_AUX_INFO_BIT_ALIGN 1 +#define _SPACC_AUX_INFO_CBC_CS 16 + +/********* STAT_POP Bitmasks *********/ +#define _SPACC_STAT_POP_POP 0 +#define SPACC_STAT_POP_POP BIT(_SPACC_STAT_POP_POP) + +/********** STATUS Bitmasks **********/ +#define _SPACC_STATUS_SW_ID 0 +#define _SPACC_STATUS_RET_CODE 24 +#define _SPACC_STATUS_SEC_CMD 31 +#define SPACC_GET_STATUS_RET_CODE(s) \ + (((s) >> _SPACC_STATUS_RET_CODE) & 0x7) + +#define SPACC_STATUS_SW_ID_MASK (0xFF << _SPACC_STATUS_SW_ID) +#define SPACC_STATUS_SW_ID_GET(y) \ + (((y) & SPACC_STATUS_SW_ID_MASK) >> _SPACC_STATUS_SW_ID) + +/********** KEY_SZ Bitmasks **********/ +#define _SPACC_KEY_SZ_SIZE 0 +#define _SPACC_KEY_SZ_CTX_IDX 8 +#define _SPACC_KEY_SZ_CIPHER 31 + +#define SPACC_KEY_SZ_CIPHER BIT(_SPACC_KEY_SZ_CIPHER) + +#define SPACC_SET_CIPHER_KEY_SZ(z) \ + (((z) << _SPACC_KEY_SZ_SIZE) | (1UL << _SPACC_KEY_SZ_CIPHER)) +#define SPACC_SET_HASH_KEY_SZ(z) ((z) << _SPACC_KEY_SZ_SIZE) +#define SPACC_SET_KEY_CTX(ctx) ((ctx) << _SPACC_KEY_SZ_CTX_IDX) + +/*****************************************************************************/ + +#define AUX_DIR(a) ((a) << _SPACC_AUX_INFO_DIR) +#define AUX_BIT_ALIGN(a) ((a) << _SPACC_AUX_INFO_BIT_ALIGN) +#define AUX_CBC_CS(a) ((a) << _SPACC_AUX_INFO_CBC_CS) + +#define VPRIO_SET(mode, weight) \ + (((mode) << _SPACC_VPRIO_MODE) | ((weight) << _SPACC_VPRIO_WEIGHT)) + +#ifndef MAX_DDT_ENTRIES +/* add one for null at end of list */ +#define MAX_DDT_ENTRIES \ + ((SPACC_MAX_MSG_MALLOC_SIZE / SPACC_MAX_PARTICLE_SIZE) + 1) +#endif + +#define DDT_ENTRY_SIZE (sizeof(ddt_entry) * MAX_DDT_ENTRIES) + +#ifndef SPACC_MAX_JOBS +#define SPACC_MAX_JOBS BIT(SPACC_SW_CTRL_ID_W) +#endif + +#if SPACC_MAX_JOBS > 256 +# error SPACC_MAX_JOBS cannot exceed 256. +#endif + +#ifndef SPACC_MAX_JOB_BUFFERS +#define SPACC_MAX_JOB_BUFFERS 192 +#endif + +#define CRYPTO_USED_JB 256 + +/* max DDT particle size */ +#ifndef SPACC_MAX_PARTICLE_SIZE +#define SPACC_MAX_PARTICLE_SIZE 4096 +#endif + +/* max message size from HW configuration */ +/* usually defined in ICD as (2 exponent 16) -1 */ +#ifndef _SPACC_MAX_MSG_MALLOC_SIZE +#define _SPACC_MAX_MSG_MALLOC_SIZE 16 +#endif +#define SPACC_MAX_MSG_MALLOC_SIZE BIT(_SPACC_MAX_MSG_MALLOC_SIZE) + +#ifndef SPACC_MAX_MSG_SIZE +#define SPACC_MAX_MSG_SIZE (SPACC_MAX_MSG_MALLOC_SIZE - 1) +#endif + +#define SPACC_LOOP_WAIT 1000000 +#define SPACC_CTR_IV_MAX8 ((u32)0xFF) +#define SPACC_CTR_IV_MAX16 ((u32)0xFFFF) +#define SPACC_CTR_IV_MAX32 ((u32)0xFFFFFFFF) +#define SPACC_CTR_IV_MAX64 ((u64)0xFFFFFFFFFFFFFFFF) + +/* cipher algos */ +enum ecipher { + C_NULL = 0, + C_DES = 1, + C_AES = 2, + C_RC4 = 3, + C_MULTI2 = 4, + C_KASUMI = 5, + C_SNOW3G_UEA2 = 6, + C_ZUC_UEA3 = 7, + C_CHACHA20 = 8, + C_SM4 = 9, + C_MAX = 10 +}; + +/* ctrl reg cipher modes */ +enum eciphermode { + CM_ECB = 0, + CM_CBC = 1, + CM_CTR = 2, + CM_CCM = 3, + CM_GCM = 5, + CM_OFB = 7, + CM_CFB = 8, + CM_F8 = 9, + CM_XTS = 10, + CM_MAX = 11 +}; + +enum echachaciphermode { + CM_CHACHA_STREAM = 2, + CM_CHACHA_AEAD = 5 +}; + +enum ehash { + H_NULL = 0, + H_MD5 = 1, + H_SHA1 = 2, + H_SHA224 = 3, + H_SHA256 = 4, + H_SHA384 = 5, + H_SHA512 = 6, + H_XCBC = 7, + H_CMAC = 8, + H_KF9 = 9, + H_SNOW3G_UIA2 = 10, + H_CRC32_I3E802_3 = 11, + H_ZUC_UIA3 = 12, + H_SHA512_224 = 13, + H_SHA512_256 = 14, + H_MICHAEL = 15, + H_SHA3_224 = 16, + H_SHA3_256 = 17, + H_SHA3_384 = 18, + H_SHA3_512 = 19, + H_SHAKE128 = 20, + H_SHAKE256 = 21, + H_POLY1305 = 22, + H_SM3 = 23, + H_SM4_XCBC_MAC = 24, + H_SM4_CMAC = 25, + H_MAX = 26 +}; + +enum ehashmode { + HM_RAW = 0, + HM_SSLMAC = 1, + HM_HMAC = 2, + HM_MAX = 3 +}; + +enum eshakehashmode { + HM_SHAKE_SHAKE = 0, + HM_SHAKE_CSHAKE = 1, + HM_SHAKE_KMAC = 2 +}; + +enum spacc_ret_code { + SPACC_OK = 0, + SPACC_ICVFAIL = 1, + SPACC_MEMERR = 2, + SPACC_BLOCKERR = 3, + SPACC_SECERR = 4 +}; + +enum eicvpos { + IP_ICV_OFFSET = 0, + IP_ICV_APPEND = 1, + IP_ICV_IGNORE = 2, + IP_MAX = 3 +}; + +enum { + /* HASH of plaintext */ + ICV_HASH = 0, + /* HASH the plaintext and encrypt the plaintext and ICV */ + ICV_HASH_ENCRYPT = 1, + /* HASH the ciphertext */ + ICV_ENCRYPT_HASH = 2, + ICV_IGNORE = 3, + IM_MAX = 4 +}; + +enum { + NO_PARTIAL_PCK = 0, + FIRST_PARTIAL_PCK = 1, + MIDDLE_PARTIAL_PCK = 2, + LAST_PARTIAL_PCK = 3 +}; + +enum crypto_modes { + CRYPTO_MODE_NULL, + CRYPTO_MODE_AES_ECB, + CRYPTO_MODE_AES_CBC, + CRYPTO_MODE_AES_CTR, + CRYPTO_MODE_AES_CCM, + CRYPTO_MODE_AES_GCM, + CRYPTO_MODE_AES_F8, + CRYPTO_MODE_AES_XTS, + CRYPTO_MODE_AES_CFB, + CRYPTO_MODE_AES_OFB, + CRYPTO_MODE_AES_CS1, + CRYPTO_MODE_AES_CS2, + CRYPTO_MODE_AES_CS3, + CRYPTO_MODE_MULTI2_ECB, + CRYPTO_MODE_MULTI2_CBC, + CRYPTO_MODE_MULTI2_OFB, + CRYPTO_MODE_MULTI2_CFB, + CRYPTO_MODE_3DES_CBC, + CRYPTO_MODE_3DES_ECB, + CRYPTO_MODE_DES_CBC, + CRYPTO_MODE_DES_ECB, + CRYPTO_MODE_KASUMI_ECB, + CRYPTO_MODE_KASUMI_F8, + CRYPTO_MODE_SNOW3G_UEA2, + CRYPTO_MODE_ZUC_UEA3, + CRYPTO_MODE_CHACHA20_STREAM, + CRYPTO_MODE_CHACHA20_POLY1305, + CRYPTO_MODE_SM4_ECB, + CRYPTO_MODE_SM4_CBC, + CRYPTO_MODE_SM4_CFB, + CRYPTO_MODE_SM4_OFB, + CRYPTO_MODE_SM4_CTR, + CRYPTO_MODE_SM4_CCM, + CRYPTO_MODE_SM4_GCM, + CRYPTO_MODE_SM4_F8, + CRYPTO_MODE_SM4_XTS, + CRYPTO_MODE_SM4_CS1, + CRYPTO_MODE_SM4_CS2, + CRYPTO_MODE_SM4_CS3, + + CRYPTO_MODE_HASH_MD5, + CRYPTO_MODE_HMAC_MD5, + CRYPTO_MODE_HASH_SHA1, + CRYPTO_MODE_HMAC_SHA1, + CRYPTO_MODE_HASH_SHA224, + CRYPTO_MODE_HMAC_SHA224, + CRYPTO_MODE_HASH_SHA256, + CRYPTO_MODE_HMAC_SHA256, + CRYPTO_MODE_HASH_SHA384, + CRYPTO_MODE_HMAC_SHA384, + CRYPTO_MODE_HASH_SHA512, + CRYPTO_MODE_HMAC_SHA512, + CRYPTO_MODE_HASH_SHA512_224, + CRYPTO_MODE_HMAC_SHA512_224, + CRYPTO_MODE_HASH_SHA512_256, + CRYPTO_MODE_HMAC_SHA512_256, + + CRYPTO_MODE_MAC_XCBC, + CRYPTO_MODE_MAC_CMAC, + CRYPTO_MODE_MAC_KASUMI_F9, + CRYPTO_MODE_MAC_SNOW3G_UIA2, + CRYPTO_MODE_MAC_ZUC_UIA3, + CRYPTO_MODE_MAC_POLY1305, + + CRYPTO_MODE_SSLMAC_MD5, + CRYPTO_MODE_SSLMAC_SHA1, + CRYPTO_MODE_HASH_CRC32, + CRYPTO_MODE_MAC_MICHAEL, + + CRYPTO_MODE_HASH_SHA3_224, + CRYPTO_MODE_HASH_SHA3_256, + CRYPTO_MODE_HASH_SHA3_384, + CRYPTO_MODE_HASH_SHA3_512, + + CRYPTO_MODE_HASH_SHAKE128, + CRYPTO_MODE_HASH_SHAKE256, + CRYPTO_MODE_HASH_CSHAKE128, + CRYPTO_MODE_HASH_CSHAKE256, + CRYPTO_MODE_MAC_KMAC128, + CRYPTO_MODE_MAC_KMAC256, + CRYPTO_MODE_MAC_KMACXOF128, + CRYPTO_MODE_MAC_KMACXOF256, + + CRYPTO_MODE_HASH_SM3, + CRYPTO_MODE_HMAC_SM3, + CRYPTO_MODE_MAC_SM4_XCBC, + CRYPTO_MODE_MAC_SM4_CMAC, + + CRYPTO_MODE_LAST +}; + +/* job descriptor */ +typedef void (*spacc_callback)(void *spacc_dev, void *data); + +struct spacc_job { + unsigned long + enc_mode, /* Encryption Algorithm mode */ + hash_mode, /* HASH Algorithm mode */ + icv_len, + icv_offset, + op, /* Operation */ + ctrl, /* CTRL shadow register */ + + /* context just initialized or taken, + * and this is the first use. + */ + first_use, + pre_aad_sz, post_aad_sz, /* size of AAD for the latest packet*/ + hkey_sz, + ckey_sz; + + /* Direction and bit alignment parameters for the AUX_INFO reg */ + unsigned int auxinfo_dir, auxinfo_bit_align; + unsigned int auxinfo_cs_mode; /* AUX info setting for CBC-CS */ + + u32 ctx_idx; + unsigned int job_used, job_swid, job_done, job_err, job_secure; + spacc_callback cb; + void *cbdata; + +}; + +#define SPACC_CTX_IDX_UNUSED 0xFFFFFFFF +#define SPACC_JOB_IDX_UNUSED 0xFFFFFFFF + +struct spacc_ctx { + /* Memory context to store cipher keys*/ + void __iomem *ciph_key; + /* Memory context to store hash keys*/ + void __iomem *hash_key; + /* reference count of jobs using this context */ + int ref_cnt; + /* number of contexts following related to this one */ + int ncontig; +}; + +#define SPACC_CTRL_MASK(field) \ + (1UL << spacc->config.ctrl_map[(field)]) +#define SPACC_CTRL_SET(field, value) \ + ((value) << spacc->config.ctrl_map[(field)]) + +enum { + SPACC_CTRL_VER_0, + SPACC_CTRL_VER_1, + SPACC_CTRL_VER_2, + SPACC_CTRL_VER_SIZE +}; + +enum { + SPACC_CTRL_CIPH_ALG, + SPACC_CTRL_CIPH_MODE, + SPACC_CTRL_HASH_ALG, + SPACC_CTRL_HASH_MODE, + SPACC_CTRL_ENCRYPT, + SPACC_CTRL_CTX_IDX, + SPACC_CTRL_SEC_KEY, + SPACC_CTRL_AAD_COPY, + SPACC_CTRL_ICV_PT, + SPACC_CTRL_ICV_ENC, + SPACC_CTRL_ICV_APPEND, + SPACC_CTRL_KEY_EXP, + SPACC_CTRL_MSG_BEGIN, + SPACC_CTRL_MSG_END, + SPACC_CTRL_MAPSIZE +}; + +struct spacc_device { + void __iomem *regmap; + int zero_key; + + /* hardware configuration */ + struct { + unsigned int version, + pdu_version, + project; + uint32_t max_msg_size; /* max PROCLEN value */ + + unsigned char modes[CRYPTO_MODE_LAST]; + + int num_ctx, /* no. of contexts */ + num_sec_ctx, /* no. of SKP contexts*/ + sec_ctx_page_size, /* page size of SKP context in bytes*/ + ciph_page_size, /* cipher context page size in bytes*/ + hash_page_size, /* hash context page size in bytes*/ + string_size, + is_qos, /* QOS spacc? */ + is_pdu, /* PDU spacc? */ + is_secure, + is_secure_port, /* Are we on the secure port? */ + is_partial, /* Is partial processing enabled? */ + is_ivimport, /* is ivimport enabled? */ + dma_type, /* DMA type: linear or scattergather */ + idx, /* Which virtual spacc IDX is this? */ + priority, /* Weighted priority of virtual spacc */ + cmd0_fifo_depth, /* CMD FIFO depths */ + cmd1_fifo_depth, + cmd2_fifo_depth, + stat_fifo_depth, /* depth of STATUS FIFO */ + fifo_cnt, + ideal_stat_level, + spacc_endian; + + uint32_t wd_timer; + u64 oldtimer, timer; + + const u8 *ctrl_map; /* map of ctrl register field offsets */ + } config; + + struct spacc_job_buffer { + int active; + int job_idx; + struct pdu_ddt *src, *dst; + u32 proc_sz, aad_offset, pre_aad_sz, + post_aad_sz, iv_offset, prio; + } job_buffer[SPACC_MAX_JOB_BUFFERS]; + + int jb_head, jb_tail; + + int op_mode, /* operating mode and watchdog functionality */ + wdcnt; /* number of pending WD IRQs*/ + + /* SW_ID value which will be used for next job. */ + unsigned int job_next_swid; + + struct spacc_ctx *ctx; /* This size changes per configured device */ + struct spacc_job *job; /* allocate memory for [SPACC_MAX_JOBS]; */ + int job_lookup[SPACC_MAX_JOBS]; /* correlate SW_ID back to job index */ + + spinlock_t lock; /* lock for register access */ + spinlock_t ctx_lock; /* lock for context manager */ + + /* callback functions for IRQ processing */ + void (*irq_cb_cmdx)(struct spacc_device *spacc, int x); + void (*irq_cb_stat)(struct spacc_device *spacc); + void (*irq_cb_stat_wd)(struct spacc_device *spacc); + + /* this is called after jobs have been popped off the STATUS FIFO + * useful so you can be told when there might be space available + * in the CMD FIFO + */ + void (*spacc_notify_jobs)(struct spacc_device *spacc); + + /* cache*/ + struct { + u32 src_ptr, + dst_ptr, + proc_len, + icv_len, + icv_offset, + pre_aad, + post_aad, + iv_offset, + offset, + aux; + } cache; + + struct device *dptr; +}; + +enum { + SPACC_IRQ_MODE_WD = 1, /* use WD*/ + SPACC_IRQ_MODE_STEP = 2 /* older use CMD/STAT stepping */ +}; + +enum { + SPACC_IRQ_CMD_GET = 0, + SPACC_IRQ_CMD_SET = 1 +}; + +struct spacc_priv { + struct spacc_device spacc; + struct semaphore core_running; + struct tasklet_struct pop_jobs; + spinlock_t hw_lock; + unsigned long max_msg_len; +}; + + +int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctx, + int secure_mode, spacc_callback cb, void *cbdata); +int spacc_clone_handle(struct spacc_device *spacc, int old_handle, + void *cbdata); +int spacc_close(struct spacc_device *spacc, int job_idx); +int spacc_set_operation(struct spacc_device *spacc, int job_idx, int op, + u32 prot, uint32_t icvcmd, uint32_t icvoff, + uint32_t icvsz, uint32_t sec_key); +int spacc_set_key_exp(struct spacc_device *spacc, int job_idx); + +int spacc_packet_enqueue_ddt_ex(struct spacc_device *spacc, int use_jb, + int job_idx, struct pdu_ddt *src_ddt, struct pdu_ddt *dst_ddt, + u32 proc_sz, uint32_t aad_offset, uint32_t pre_aad_sz, + u32 post_aad_sz, uint32_t iv_offset, uint32_t prio); +int spacc_packet_enqueue_ddt(struct spacc_device *spacc, int job_idx, + struct pdu_ddt *src_ddt, struct pdu_ddt *dst_ddt, + uint32_t proc_sz, u32 aad_offset, uint32_t pre_aad_sz, + uint32_t post_aad_sz, u32 iv_offset, uint32_t prio); + +/* IRQ handling functions */ +void spacc_irq_cmdx_enable(struct spacc_device *spacc, int cmdx, int cmdx_cnt); +void spacc_irq_cmdx_disable(struct spacc_device *spacc, int cmdx); +void spacc_irq_stat_enable(struct spacc_device *spacc, int stat_cnt); +void spacc_irq_stat_disable(struct spacc_device *spacc); +void spacc_irq_stat_wd_enable(struct spacc_device *spacc); +void spacc_irq_stat_wd_disable(struct spacc_device *spacc); +void spacc_irq_glbl_enable(struct spacc_device *spacc); +void spacc_irq_glbl_disable(struct spacc_device *spacc); +uint32_t spacc_process_irq(struct spacc_device *spacc); +void spacc_set_wd_count(struct spacc_device *spacc, uint32_t val); +irqreturn_t spacc_irq_handler(int irq, void *dev); +int spacc_sgs_to_ddt(struct device *dev, + struct scatterlist *sg1, int len1, int *ents1, + struct scatterlist *sg2, int len2, int *ents2, + struct scatterlist *sg3, int len3, int *ents3, + struct pdu_ddt *ddt, int dma_direction); +int spacc_sg_to_ddt(struct device *dev, struct scatterlist *sg, + int nbytes, struct pdu_ddt *ddt, int dma_direction); + +/* Context Manager */ +void spacc_ctx_init_all(struct spacc_device *spacc); + +/* SPAcc specific manipulation of context memory */ +int spacc_write_context(struct spacc_device *spacc, int job_idx, int op, + const unsigned char *key, int ksz, + const unsigned char *iv, int ivsz); + +int spacc_read_context(struct spacc_device *spacc, int job_idx, int op, + unsigned char *key, int ksz, unsigned char *iv, + int ivsz); + +/* Job Manager */ +void spacc_job_init_all(struct spacc_device *spacc); +int spacc_job_request(struct spacc_device *dev, int job_idx); +int spacc_job_release(struct spacc_device *dev, int job_idx); +int spacc_handle_release(struct spacc_device *spacc, int job_idx); + +/* Helper functions */ +struct spacc_ctx *context_lookup_by_job(struct spacc_device *spacc, + int job_idx); +int spacc_isenabled(struct spacc_device *spacc, int mode, int keysize); +int spacc_compute_xcbc_key(struct spacc_device *spacc, int mode_id, + int job_idx, const unsigned char *key, + int keylen, unsigned char *xcbc_out); + +int spacc_process_jb(struct spacc_device *spacc); +int spacc_remove(struct platform_device *pdev); +int spacc_static_config(struct spacc_device *spacc); +int spacc_autodetect(struct spacc_device *spacc); +void spacc_pop_jobs(unsigned long data); +void spacc_fini(struct spacc_device *spacc); +int spacc_init(void __iomem *baseaddr, struct spacc_device *spacc, + struct pdu_info *info); +int spacc_pop_packets(struct spacc_device *spacc, int *num_popped); +void spacc_stat_process(struct spacc_device *spacc); +void spacc_cmd_process(struct spacc_device *spacc, int x); + +#endif diff --git a/drivers/crypto/dwc-spacc/spacc_device.c b/drivers/crypto/dwc-spacc/spacc_device.c new file mode 100644 index 000000000000..964ccdf294e3 --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_device.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include "spacc_device.h" + +static struct platform_device *spacc_pdev[MAX_DEVICES]; + +#define VSPACC_PRIORITY_MAX 15 + +void spacc_cmd_process(struct spacc_device *spacc, int x) +{ + struct spacc_priv *priv = container_of(spacc, struct spacc_priv, spacc); + + /* run tasklet to pop jobs off fifo */ + tasklet_schedule(&priv->pop_jobs); +} +void spacc_stat_process(struct spacc_device *spacc) +{ + struct spacc_priv *priv = container_of(spacc, struct spacc_priv, spacc); + + /* run tasklet to pop jobs off fifo */ + tasklet_schedule(&priv->pop_jobs); +} + + +int spacc_probe(struct platform_device *pdev, + const struct of_device_id snps_spacc_id[]) +{ + int spacc_idx = -1; + struct resource *mem; + int spacc_endian = 0; + void __iomem *baseaddr; + struct pdu_info info; + int spacc_priority = -1; + struct spacc_priv *priv; + int x = 0, err, oldmode, irq_num; + const struct of_device_id *match, *id; + u64 oldtimer = 100000, timer = 100000; + + if (pdev->dev.of_node) { + id = of_match_node(snps_spacc_id, pdev->dev.of_node); + if (!id) { + dev_err(&pdev->dev, "DT node did not match\n"); + return -EINVAL; + } + } + + /* Initialize DDT DMA pools based on this device's resources */ + if (pdu_mem_init(&pdev->dev)) { + dev_err(&pdev->dev, "Could not initialize DMA pools\n"); + return -ENOMEM; + } + + match = of_match_device(of_match_ptr(snps_spacc_id), &pdev->dev); + if (!match) { + dev_err(&pdev->dev, "SPAcc dtb missing"); + return -ENODEV; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no memory resource for spacc\n"); + err = -ENXIO; + goto free_ddt_mem_pool; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto free_ddt_mem_pool; + } + + /* Read spacc priority and index and save inside priv.spacc.config */ + if (of_property_read_u32(pdev->dev.of_node, "spacc_priority", + &spacc_priority)) { + dev_err(&pdev->dev, "No vspacc priority specified\n"); + err = -EINVAL; + goto free_ddt_mem_pool; + } + + if (spacc_priority < 0 && spacc_priority > VSPACC_PRIORITY_MAX) { + dev_err(&pdev->dev, "Invalid vspacc priority\n"); + err = -EINVAL; + goto free_ddt_mem_pool; + } + priv->spacc.config.priority = spacc_priority; + + if (of_property_read_u32(pdev->dev.of_node, "spacc_index", + &spacc_idx)) { + dev_err(&pdev->dev, "No vspacc index specified\n"); + err = -EINVAL; + goto free_ddt_mem_pool; + } + priv->spacc.config.idx = spacc_idx; + + if (of_property_read_u32(pdev->dev.of_node, "spacc_endian", + &spacc_endian)) { + dev_dbg(&pdev->dev, "No spacc_endian specified\n"); + dev_dbg(&pdev->dev, "Default spacc Endianness (0==little)\n"); + spacc_endian = 0; + } + priv->spacc.config.spacc_endian = spacc_endian; + + if (of_property_read_u64(pdev->dev.of_node, "oldtimer", + &oldtimer)) { + dev_dbg(&pdev->dev, "No oldtimer specified\n"); + dev_dbg(&pdev->dev, "Default oldtimer (100000)\n"); + oldtimer = 100000; + } + priv->spacc.config.oldtimer = oldtimer; + + if (of_property_read_u64(pdev->dev.of_node, "timer", &timer)) { + dev_dbg(&pdev->dev, "No timer specified\n"); + dev_dbg(&pdev->dev, "Default timer (100000)\n"); + timer = 100000; + } + priv->spacc.config.timer = timer; + + baseaddr = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(baseaddr)) { + dev_err(&pdev->dev, "unable to map iomem\n"); + err = PTR_ERR(baseaddr); + goto free_ddt_mem_pool; + } + + pdu_get_version(baseaddr, &info); + if (pdev->dev.platform_data) { + struct pdu_info *parent_info = pdev->dev.platform_data; + + memcpy(&info.pdu_config, &parent_info->pdu_config, + sizeof(info.pdu_config)); + } + + dev_dbg(&pdev->dev, "EPN %04X : virt [%d]\n", + info.spacc_version.project, + info.spacc_version.vspacc_idx); + + /* Validate virtual spacc index with vspacc count read from + * VERSION_EXT.VSPACC_CNT. Thus vspacc count=3, gives valid index 0,1,2 + */ + if (spacc_idx != info.spacc_version.vspacc_idx) { + dev_err(&pdev->dev, "DTS vspacc_idx mismatch read value\n"); + err = -EINVAL; + goto free_ddt_mem_pool; + } + + if (spacc_idx < 0 || spacc_idx > (info.spacc_config.num_vspacc - 1)) { + dev_err(&pdev->dev, "Invalid vspacc index specified\n"); + err = -EINVAL; + goto free_ddt_mem_pool; + } + + err = spacc_init(baseaddr, &priv->spacc, &info); + if (err != CRYPTO_OK) { + dev_err(&pdev->dev, "Failed to initialize device %d\n", x); + err = -ENXIO; + goto free_ddt_mem_pool; + } + + spin_lock_init(&priv->hw_lock); + spacc_irq_glbl_disable(&priv->spacc); + tasklet_init(&priv->pop_jobs, spacc_pop_jobs, (unsigned long)priv); + + priv->spacc.dptr = &pdev->dev; + platform_set_drvdata(pdev, priv); + + irq_num = platform_get_irq(pdev, 0); + if (irq_num < 0) { + dev_err(&pdev->dev, "no irq resource for spacc\n"); + err = -ENXIO; + goto free_ddt_mem_pool; + } + + /* Determine configured maximum message length. */ + priv->max_msg_len = priv->spacc.config.max_msg_size; + + if (devm_request_irq(&pdev->dev, irq_num, spacc_irq_handler, + IRQF_SHARED, dev_name(&pdev->dev), + &pdev->dev)) { + dev_err(&pdev->dev, "failed to request IRQ\n"); + err = -EBUSY; + goto err_tasklet_kill; + } + + priv->spacc.irq_cb_stat = spacc_stat_process; + priv->spacc.irq_cb_cmdx = spacc_cmd_process; + oldmode = priv->spacc.op_mode; + priv->spacc.op_mode = SPACC_OP_MODE_IRQ; + + spacc_irq_stat_enable(&priv->spacc, 1); + spacc_irq_cmdx_enable(&priv->spacc, 0, 1); + spacc_irq_stat_wd_disable(&priv->spacc); + spacc_irq_glbl_enable(&priv->spacc); + + +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AUTODETECT) + err = spacc_autodetect(&priv->spacc); + if (err < 0) { + spacc_irq_glbl_disable(&priv->spacc); + goto err_tasklet_kill; + } +#else + err = spacc_static_config(&priv->spacc); + if (err < 0) { + spacc_irq_glbl_disable(&priv->spacc); + goto err_tasklet_kill; + } +#endif + + priv->spacc.op_mode = oldmode; + + if (priv->spacc.op_mode == SPACC_OP_MODE_IRQ) { + priv->spacc.irq_cb_stat = spacc_stat_process; + priv->spacc.irq_cb_cmdx = spacc_cmd_process; + + spacc_irq_stat_enable(&priv->spacc, 1); + spacc_irq_cmdx_enable(&priv->spacc, 0, 1); + spacc_irq_glbl_enable(&priv->spacc); + } else { + priv->spacc.irq_cb_stat = spacc_stat_process; + priv->spacc.irq_cb_stat_wd = spacc_stat_process; + + spacc_irq_stat_enable(&priv->spacc, + priv->spacc.config.ideal_stat_level); + + spacc_irq_cmdx_disable(&priv->spacc, 0); + spacc_irq_stat_wd_enable(&priv->spacc); + spacc_irq_glbl_enable(&priv->spacc); + + /* enable the wd by setting the wd_timer = 100000 */ + spacc_set_wd_count(&priv->spacc, + priv->spacc.config.wd_timer = + priv->spacc.config.timer); + } + + /* unlock normal*/ + if (priv->spacc.config.is_secure_port) { + u32 t; + + t = readl(baseaddr + SPACC_REG_SECURE_CTRL); + t &= ~(1UL << 31); + writel(t, baseaddr + SPACC_REG_SECURE_CTRL); + } + + /* unlock device by default */ + writel(0, baseaddr + SPACC_REG_SECURE_CTRL); + + return err; + +err_tasklet_kill: + tasklet_kill(&priv->pop_jobs); + spacc_fini(&priv->spacc); + +free_ddt_mem_pool: + pdu_mem_deinit(&pdev->dev); + + return err; +} + +static void spacc_unregister_algs(void) +{ +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_HASH) + spacc_unregister_hash_algs(); +#endif +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AEAD) + spacc_unregister_aead_algs(); +#endif +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_CIPHER) + spacc_unregister_cipher_algs(); +#endif +} + +static const struct of_device_id snps_spacc_id[] = { + {.compatible = "snps-dwc-spacc" }, + { /*sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, snps_spacc_id); + +static int spacc_crypto_probe(struct platform_device *pdev) +{ + int rc; + + rc = spacc_probe(pdev, snps_spacc_id); + if (rc < 0) + goto err; + + spacc_pdev[0] = pdev; + +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_HASH) + rc = probe_hashes(pdev); + if (rc < 0) + goto err; +#endif + +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_CIPHER) + rc = probe_ciphers(pdev); + if (rc < 0) + goto err; +#endif + +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AEAD) + rc = probe_aeads(pdev); + if (rc < 0) + goto err; +#endif + + return 0; +err: + spacc_unregister_algs(); + + return rc; +} + +static void spacc_crypto_remove(struct platform_device *pdev) +{ + spacc_unregister_algs(); + spacc_remove(pdev); +} + +static struct platform_driver spacc_driver = { + .probe = spacc_crypto_probe, + .remove = spacc_crypto_remove, + .driver = { + .name = "spacc", + .of_match_table = of_match_ptr(snps_spacc_id), + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(spacc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Synopsys, Inc."); +MODULE_DESCRIPTION("SPAcc Crypto Accelerator Driver"); diff --git a/drivers/crypto/dwc-spacc/spacc_device.h b/drivers/crypto/dwc-spacc/spacc_device.h new file mode 100644 index 000000000000..be7fde25046b --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_device.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef SPACC_DEVICE_H_ +#define SPACC_DEVICE_H_ + +#include +#include +#include +#include +#include "spacc_core.h" + +#define MODE_TAB_AEAD(_name, _ciph, _hash, _hashlen, _ivlen, _blocklen) \ + .name = _name, .aead = { .ciph = _ciph, .hash = _hash }, \ + .hashlen = _hashlen, .ivlen = _ivlen, .blocklen = _blocklen + +/* Helper macros for initializing the hash/cipher tables. */ +#define MODE_TAB_COMMON(_name, _id_name, _blocklen) \ + .name = _name, .id = CRYPTO_MODE_##_id_name, .blocklen = _blocklen + +#define MODE_TAB_HASH(_name, _id_name, _hashlen, _blocklen) \ + MODE_TAB_COMMON(_name, _id_name, _blocklen), \ + .hashlen = _hashlen, .testlen = _hashlen + +#define MODE_TAB_CIPH(_name, _id_name, _ivlen, _blocklen) \ + MODE_TAB_COMMON(_name, _id_name, _blocklen), \ + .ivlen = _ivlen + +#define MODE_TAB_HASH_XCBC 0x8000 + +#define SPACC_MAX_DIGEST_SIZE 64 +#define SPACC_MAX_KEY_SIZE 32 +#define SPACC_MAX_IV_SIZE 16 + +#define SPACC_DMA_ALIGN 4 +#define SPACC_DMA_BOUNDARY 0x10000 + +#define MAX_DEVICES 2 +/* flag means the IV is computed from setkey and crypt*/ +#define SPACC_MANGLE_IV_FLAG 0x8000 + +/* we're doing a CTR mangle (for RFC3686/IPsec)*/ +#define SPACC_MANGLE_IV_RFC3686 0x0100 + +/* we're doing GCM */ +#define SPACC_MANGLE_IV_RFC4106 0x0200 + +/* we're doing GMAC */ +#define SPACC_MANGLE_IV_RFC4543 0x0300 + +/* we're doing CCM */ +#define SPACC_MANGLE_IV_RFC4309 0x0400 + +/* we're doing SM4 GCM/CCM */ +#define SPACC_MANGLE_IV_RFC8998 0x0500 + +#define CRYPTO_MODE_AES_CTR_RFC3686 (CRYPTO_MODE_AES_CTR \ + | SPACC_MANGLE_IV_FLAG \ + | SPACC_MANGLE_IV_RFC3686) +#define CRYPTO_MODE_AES_GCM_RFC4106 (CRYPTO_MODE_AES_GCM \ + | SPACC_MANGLE_IV_FLAG \ + | SPACC_MANGLE_IV_RFC4106) +#define CRYPTO_MODE_AES_GCM_RFC4543 (CRYPTO_MODE_AES_GCM \ + | SPACC_MANGLE_IV_FLAG \ + | SPACC_MANGLE_IV_RFC4543) +#define CRYPTO_MODE_AES_CCM_RFC4309 (CRYPTO_MODE_AES_CCM \ + | SPACC_MANGLE_IV_FLAG \ + | SPACC_MANGLE_IV_RFC4309) +#define CRYPTO_MODE_SM4_GCM_RFC8998 (CRYPTO_MODE_SM4_GCM) +#define CRYPTO_MODE_SM4_CCM_RFC8998 (CRYPTO_MODE_SM4_CCM) + +struct spacc_crypto_ctx { + struct device *dev; + + spinlock_t lock; + struct list_head jobs; + int handle, mode, auth_size, key_len; + unsigned char *cipher_key; + + /* + * Indicates that the H/W context has been setup and can be used for + * crypto; otherwise, the software fallback will be used. + */ + bool ctx_valid; + unsigned int flag_ppp; + + /* salt used for rfc3686/givencrypt mode */ + unsigned char csalt[16]; + u8 ipad[128] __aligned(sizeof(u32)); + u8 digest_ctx_buf[128] __aligned(sizeof(u32)); + u8 tmp_buffer[128] __aligned(sizeof(u32)); + + /* Save keylen from setkey */ + int keylen; + u8 key[256]; + int zero_key; + unsigned char *tmp_sgl_buff; + struct scatterlist *tmp_sgl; + + union{ + struct crypto_ahash *hash; + struct crypto_aead *aead; + struct crypto_skcipher *cipher; + } fb; +}; + +struct spacc_crypto_reqctx { + struct pdu_ddt src, dst; + void *digest_buf, *iv_buf; + dma_addr_t digest_dma; + int dst_nents, src_nents, aead_nents, total_nents; + int encrypt_op, mode, single_shot; + unsigned int spacc_cipher_cryptlen, rem_nents; + + struct aead_cb_data { + int new_handle; + struct spacc_crypto_ctx *tctx; + struct spacc_crypto_reqctx *ctx; + struct aead_request *req; + struct spacc_device *spacc; + } cb; + + struct ahash_cb_data { + int new_handle; + struct spacc_crypto_ctx *tctx; + struct spacc_crypto_reqctx *ctx; + struct ahash_request *req; + struct spacc_device *spacc; + } acb; + + struct cipher_cb_data { + int new_handle; + struct spacc_crypto_ctx *tctx; + struct spacc_crypto_reqctx *ctx; + struct skcipher_request *req; + struct spacc_device *spacc; + } ccb; + + union { + struct ahash_request hash_req; + struct skcipher_request cipher_req; + struct aead_request aead_req; + } fb; +}; + +struct mode_tab { + char name[128]; + + int valid; + + /* mode ID used in hash/cipher mode but not aead*/ + int id; + + /* ciph/hash mode used in aead */ + struct { + int ciph, hash; + } aead; + + unsigned int hashlen, ivlen, blocklen, keylen[3]; + unsigned int keylen_mask, testlen; + unsigned int chunksize, walksize, min_keysize, max_keysize; + + bool sw_fb; + + union { + unsigned char hash_test[SPACC_MAX_DIGEST_SIZE]; + unsigned char ciph_test[3][2 * SPACC_MAX_IV_SIZE]; + }; +}; + +struct spacc_alg { + struct mode_tab *mode; + unsigned int keylen_mask; + + struct device *dev[MAX_DEVICES]; + + struct list_head list; + struct crypto_alg *calg; + struct crypto_tfm *tfm; + + union { + struct ahash_alg hash; + struct aead_alg aead; + struct skcipher_alg skcipher; + } alg; +}; + +static inline const struct spacc_alg *spacc_tfm_ahash(struct crypto_tfm *tfm) +{ + const struct crypto_alg *calg = tfm->__crt_alg; + + if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) + return container_of(calg, struct spacc_alg, alg.hash.halg.base); + + return NULL; +} + +static inline const struct spacc_alg *spacc_tfm_skcipher(struct crypto_tfm *tfm) +{ + const struct crypto_alg *calg = tfm->__crt_alg; + + if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_SKCIPHER) + return container_of(calg, struct spacc_alg, alg.skcipher.base); + + return NULL; +} + +static inline const struct spacc_alg *spacc_tfm_aead(struct crypto_tfm *tfm) +{ + const struct crypto_alg *calg = tfm->__crt_alg; + + if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AEAD) + return container_of(calg, struct spacc_alg, alg.aead.base); + + return NULL; +} + +int probe_hashes(struct platform_device *spacc_pdev); +int spacc_unregister_hash_algs(void); + +int probe_aeads(struct platform_device *spacc_pdev); +int spacc_unregister_aead_algs(void); + +int probe_ciphers(struct platform_device *spacc_pdev); +int spacc_unregister_cipher_algs(void); + +int spacc_probe(struct platform_device *pdev, + const struct of_device_id snps_spacc_id[]); + +irqreturn_t spacc_irq_handler(int irq, void *dev); +#endif diff --git a/drivers/crypto/dwc-spacc/spacc_hal.c b/drivers/crypto/dwc-spacc/spacc_hal.c new file mode 100644 index 000000000000..0d460c4df542 --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_hal.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "spacc_hal.h" + +static struct dma_pool *ddt_pool, *ddt16_pool, *ddt4_pool; +static struct device *ddt_device; + +#define PDU_REG_SPACC_VERSION 0x00180UL +#define PDU_REG_SPACC_CONFIG 0x00184UL +#define PDU_REG_SPACC_CONFIG2 0x00190UL +#define PDU_REG_SPACC_IV_OFFSET 0x00040UL +#define PDU_REG_PDU_CONFIG 0x00188UL +#define PDU_REG_SECURE_LOCK 0x001C0UL + +int pdu_get_version(void __iomem *dev, struct pdu_info *inf) +{ + unsigned long tmp; + + if (!inf) + return -1; + + memset(inf, 0, sizeof(*inf)); + tmp = readl(dev + PDU_REG_SPACC_VERSION); + + /* Read the SPAcc version block this tells us the revision, + * project, and a few other feature bits + * + * layout for v6.5+ + */ + inf->spacc_version = (struct spacc_version_block) { + .minor = SPACC_ID_MINOR(tmp), + .major = SPACC_ID_MAJOR(tmp), + .version = (SPACC_ID_MAJOR(tmp) << 4) | SPACC_ID_MINOR(tmp), + .qos = SPACC_ID_QOS(tmp), + .is_spacc = SPACC_ID_TYPE(tmp) == SPACC_TYPE_SPACCQOS, + .is_pdu = SPACC_ID_TYPE(tmp) == SPACC_TYPE_PDU, + .aux = SPACC_ID_AUX(tmp), + .vspacc_idx = SPACC_ID_VIDX(tmp), + .partial = SPACC_ID_PARTIAL(tmp), + .project = SPACC_ID_PROJECT(tmp), + }; + + /* try to autodetect */ + writel(0x80000000, dev + PDU_REG_SPACC_IV_OFFSET); + + if (readl(dev + PDU_REG_SPACC_IV_OFFSET) == 0x80000000) + inf->spacc_version.ivimport = 1; + else + inf->spacc_version.ivimport = 0; + + + /* Read the SPAcc config block (v6.5+) which tells us how many + * contexts there are and context page sizes + * this register is only available in v6.5 and up + */ + tmp = readl(dev + PDU_REG_SPACC_CONFIG); + inf->spacc_config = (struct spacc_config_block) { + SPACC_CFG_CTX_CNT(tmp), + SPACC_CFG_VSPACC_CNT(tmp), + SPACC_CFG_CIPH_CTX_SZ(tmp), + SPACC_CFG_HASH_CTX_SZ(tmp), + SPACC_CFG_DMA_TYPE(tmp), + 0, 0, 0, 0 + }; + + /* CONFIG2 only present in v6.5+ cores */ + tmp = readl(dev + PDU_REG_SPACC_CONFIG2); + if (inf->spacc_version.qos) { + inf->spacc_config.cmd0_fifo_depth = + SPACC_CFG_CMD0_FIFO_QOS(tmp); + inf->spacc_config.cmd1_fifo_depth = + SPACC_CFG_CMD1_FIFO(tmp); + inf->spacc_config.cmd2_fifo_depth = + SPACC_CFG_CMD2_FIFO(tmp); + inf->spacc_config.stat_fifo_depth = + SPACC_CFG_STAT_FIFO_QOS(tmp); + } else { + inf->spacc_config.cmd0_fifo_depth = + SPACC_CFG_CMD0_FIFO(tmp); + inf->spacc_config.stat_fifo_depth = + SPACC_CFG_STAT_FIFO(tmp); + } + + /* only read PDU config if it's actually a PDU engine */ + if (inf->spacc_version.is_pdu) { + tmp = readl(dev + PDU_REG_PDU_CONFIG); + inf->pdu_config = (struct pdu_config_block) + {SPACC_PDU_CFG_MINOR(tmp), + SPACC_PDU_CFG_MAJOR(tmp)}; + + /* unlock all cores by default */ + writel(0, dev + PDU_REG_SECURE_LOCK); + } + + return 0; +} + +void pdu_to_dev(void __iomem *addr_, uint32_t *src, unsigned long nword) +{ + void __iomem *addr = addr_; + + while (nword--) { + writel(*src++, addr); + addr += 4; + } +} + +void pdu_from_dev(u32 *dst, void __iomem *addr_, unsigned long nword) +{ + void __iomem *addr = addr_; + + while (nword--) { + *dst++ = readl(addr); + addr += 4; + } +} + +static void pdu_to_dev_big(void __iomem *addr_, const unsigned char *src, + unsigned long nword) +{ + unsigned long v; + void __iomem *addr = addr_; + + while (nword--) { + v = 0; + v = (v << 8) | ((unsigned long)*src++); + v = (v << 8) | ((unsigned long)*src++); + v = (v << 8) | ((unsigned long)*src++); + v = (v << 8) | ((unsigned long)*src++); + writel(v, addr); + addr += 4; + } +} + +static void pdu_from_dev_big(unsigned char *dst, void __iomem *addr_, + unsigned long nword) +{ + unsigned long v; + void __iomem *addr = addr_; + + while (nword--) { + v = readl(addr); + addr += 4; + *dst++ = (v >> 24) & 0xFF; v <<= 8; + *dst++ = (v >> 24) & 0xFF; v <<= 8; + *dst++ = (v >> 24) & 0xFF; v <<= 8; + *dst++ = (v >> 24) & 0xFF; v <<= 8; + } +} + +static void pdu_to_dev_little(void __iomem *addr_, const unsigned char *src, + unsigned long nword) +{ + unsigned long v; + void __iomem *addr = addr_; + + while (nword--) { + v = 0; + v = (v >> 8) | ((unsigned long)*src++ << 24UL); + v = (v >> 8) | ((unsigned long)*src++ << 24UL); + v = (v >> 8) | ((unsigned long)*src++ << 24UL); + v = (v >> 8) | ((unsigned long)*src++ << 24UL); + writel(v, addr); + addr += 4; + } +} + +static void pdu_from_dev_little(unsigned char *dst, void __iomem *addr_, + unsigned long nword) +{ + unsigned long v; + void __iomem *addr = addr_; + + while (nword--) { + v = readl(addr); + addr += 4; + *dst++ = v & 0xFF; v >>= 8; + *dst++ = v & 0xFF; v >>= 8; + *dst++ = v & 0xFF; v >>= 8; + *dst++ = v & 0xFF; v >>= 8; + } +} + +void pdu_to_dev_s(void __iomem *addr, const unsigned char *src, + unsigned long nword, int endian) +{ + if (endian) + pdu_to_dev_big(addr, src, nword); + else + pdu_to_dev_little(addr, src, nword); +} + +void pdu_from_dev_s(unsigned char *dst, void __iomem *addr, + unsigned long nword, int endian) +{ + if (endian) + pdu_from_dev_big(dst, addr, nword); + else + pdu_from_dev_little(dst, addr, nword); +} + +void pdu_io_cached_write(void __iomem *addr, unsigned long val, + uint32_t *cache) +{ + if (*cache == val) { +#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_IO + pr_debug("PDU: write %.8lx -> %p (cached)\n", val, addr); +#endif + return; + } + + *cache = val; + writel(val, addr); +} + +struct device *get_ddt_device(void) +{ + return ddt_device; +} + +/* Platform specific DDT routines */ + +/* create a DMA pool for DDT entries this should help from splitting + * pages for DDTs which by default are 520 bytes long meaning we would + * otherwise waste 3576 bytes per DDT allocated... + * we also maintain a smaller table of 4 entries common for simple jobs + * which uses 480 fewer bytes of DMA memory. + * and for good measure another table for 16 entries saving 384 bytes + */ +int pdu_mem_init(void *device) +{ + if (ddt_device) + return 0; /* Already setup */ + + ddt_device = device; + ddt_pool = dma_pool_create("spaccddt", device, (PDU_MAX_DDT + 1) * 8, + 8, 0); /* max of 64 DDT entries */ + + if (!ddt_pool) + return -1; + +#if PDU_MAX_DDT > 16 + /* max of 16 DDT entries */ + ddt16_pool = dma_pool_create("spaccddt16", device, (16 + 1) * 8, 8, 0); + if (!ddt16_pool) { + dma_pool_destroy(ddt_pool); + return -1; + } +#else + ddt16_pool = ddt_pool; +#endif + /* max of 4 DDT entries */ + ddt4_pool = dma_pool_create("spaccddt4", device, (4 + 1) * 8, 8, 0); + if (!ddt4_pool) { + dma_pool_destroy(ddt_pool); +#if PDU_MAX_DDT > 16 + dma_pool_destroy(ddt16_pool); +#endif + return -1; + } + + return 0; +} + +/* destroy the pool */ +void pdu_mem_deinit(void *device) +{ + /* For now, just skip deinit except for matching device */ + if (device != ddt_device) + return; + + dma_pool_destroy(ddt_pool); + +#if PDU_MAX_DDT > 16 + dma_pool_destroy(ddt16_pool); +#endif + dma_pool_destroy(ddt4_pool); + + ddt_device = NULL; +} + +int pdu_ddt_init(struct pdu_ddt *ddt, unsigned long limit) +{ + /* set the MSB if we want to use an ATOMIC + * allocation required for top half processing + */ + int flag = (limit & 0x80000000); + + limit &= 0x7FFFFFFF; + if (limit + 1 >= SIZE_MAX / 8) { + /* Too big to even compute DDT size */ + return -1; + } else if (limit > PDU_MAX_DDT) { + size_t len = 8 * ((size_t)limit + 1); + + ddt->virt = dma_alloc_coherent(ddt_device, len, &ddt->phys, + flag ? GFP_ATOMIC : GFP_KERNEL); + } else if (limit > 16) { + ddt->virt = dma_pool_alloc(ddt_pool, flag ? GFP_ATOMIC : + GFP_KERNEL, &ddt->phys); + } else if (limit > 4) { + ddt->virt = dma_pool_alloc(ddt16_pool, flag ? GFP_ATOMIC : + GFP_KERNEL, &ddt->phys); + } else { + ddt->virt = dma_pool_alloc(ddt4_pool, flag ? GFP_ATOMIC : + GFP_KERNEL, &ddt->phys); + } + + ddt->idx = 0; + ddt->len = 0; + ddt->limit = limit; + + if (!ddt->virt) + return -1; + +#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT + pr_debug(" DDT[%.8lx]: allocated %lu fragments\n", + (unsigned long)ddt->phys, limit); +#endif + + return 0; +} + +int pdu_ddt_add(struct pdu_ddt *ddt, dma_addr_t phys, unsigned long size) +{ +#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT + pr_debug(" DDT[%.8lx]: 0x%.8lx size %lu\n", + (unsigned long)ddt->phys, + (unsigned long)phys, size); +#endif + + if (ddt->idx == ddt->limit) + return -1; + + ddt->virt[ddt->idx * 2 + 0] = (uint32_t)phys; + ddt->virt[ddt->idx * 2 + 1] = size; + ddt->virt[ddt->idx * 2 + 2] = 0; + ddt->virt[ddt->idx * 2 + 3] = 0; + ddt->len += size; + ++(ddt->idx); + + return 0; +} + +int pdu_ddt_free(struct pdu_ddt *ddt) +{ + if (ddt->virt) { + if (ddt->limit > PDU_MAX_DDT) { + size_t len = 8 * ((size_t)ddt->limit + 1); + + dma_free_coherent(ddt_device, len, ddt->virt, + ddt->phys); + } else if (ddt->limit > 16) { + dma_pool_free(ddt_pool, ddt->virt, ddt->phys); + } else if (ddt->limit > 4) { + dma_pool_free(ddt16_pool, ddt->virt, ddt->phys); + } else { + dma_pool_free(ddt4_pool, ddt->virt, ddt->phys); + } + + ddt->virt = NULL; + } + + return 0; +} diff --git a/drivers/crypto/dwc-spacc/spacc_hal.h b/drivers/crypto/dwc-spacc/spacc_hal.h new file mode 100644 index 000000000000..8b817f993f3d --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_hal.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef SPACC_HAL_H +#define SPACC_HAL_H + +/* Maximum number of DDT entries allowed*/ +#ifndef PDU_MAX_DDT +#define PDU_MAX_DDT 64 +#endif + +/* Platform Generic */ +#define PDU_IRQ_EN_GLBL BIT(31) +#define PDU_IRQ_EN_VSPACC(x) (1UL << (x)) +#define PDU_IRQ_EN_RNG BIT(16) + +#ifndef SPACC_ID_MINOR + #define SPACC_ID_MINOR(x) ((x) & 0x0F) + #define SPACC_ID_MAJOR(x) (((x) >> 4) & 0x0F) + #define SPACC_ID_QOS(x) (((x) >> 8) & 0x01) + #define SPACC_ID_TYPE(x) (((x) >> 9) & 0x03) + #define SPACC_ID_AUX(x) (((x) >> 11) & 0x01) + #define SPACC_ID_VIDX(x) (((x) >> 12) & 0x07) + #define SPACC_ID_PARTIAL(x) (((x) >> 15) & 0x01) + #define SPACC_ID_PROJECT(x) ((x) >> 16) + + #define SPACC_TYPE_SPACCQOS 0 + #define SPACC_TYPE_PDU 1 + + #define SPACC_CFG_CTX_CNT(x) ((x) & 0x7F) + #define SPACC_CFG_RC4_CTX_CNT(x) (((x) >> 8) & 0x7F) + #define SPACC_CFG_VSPACC_CNT(x) (((x) >> 16) & 0x0F) + #define SPACC_CFG_CIPH_CTX_SZ(x) (((x) >> 20) & 0x07) + #define SPACC_CFG_HASH_CTX_SZ(x) (((x) >> 24) & 0x0F) + #define SPACC_CFG_DMA_TYPE(x) (((x) >> 28) & 0x03) + + #define SPACC_CFG_CMD0_FIFO_QOS(x) (((x) >> 0) & 0x7F) + #define SPACC_CFG_CMD0_FIFO(x) (((x) >> 0) & 0x1FF) + #define SPACC_CFG_CMD1_FIFO(x) (((x) >> 8) & 0x7F) + #define SPACC_CFG_CMD2_FIFO(x) (((x) >> 16) & 0x7F) + #define SPACC_CFG_STAT_FIFO_QOS(x) (((x) >> 24) & 0x7F) + #define SPACC_CFG_STAT_FIFO(x) (((x) >> 16) & 0x1FF) + + #define SPACC_PDU_CFG_MINOR(x) ((x) & 0x0F) + #define SPACC_PDU_CFG_MAJOR(x) (((x) >> 4) & 0x0F) + + #define PDU_SECURE_LOCK_SPACC(x) (x) + #define PDU_SECURE_LOCK_CFG BIT(30) + #define PDU_SECURE_LOCK_GLBL BIT(31) +#endif /* SPACC_ID_MINOR */ + +#define CRYPTO_OK (0) + +struct spacc_version_block { + unsigned int minor, + major, + version, + qos, + is_spacc, + is_pdu, + aux, + vspacc_idx, + partial, + project, + ivimport; +}; + +struct spacc_config_block { + unsigned int num_ctx, + num_vspacc, + ciph_ctx_page_size, + hash_ctx_page_size, + dma_type, + cmd0_fifo_depth, + cmd1_fifo_depth, + cmd2_fifo_depth, + stat_fifo_depth; +}; + +struct pdu_config_block { + unsigned int minor, + major; +}; + +struct pdu_info { + u32 clockrate; + struct spacc_version_block spacc_version; + struct spacc_config_block spacc_config; + struct pdu_config_block pdu_config; +}; + +struct pdu_ddt { + dma_addr_t phys; + u32 *virt; + u32 *virt_orig; + unsigned long idx, limit, len; +}; + +void pdu_io_cached_write(void __iomem *addr, unsigned long val, + uint32_t *cache); +void pdu_to_dev(void __iomem *addr, uint32_t *src, unsigned long nword); +void pdu_from_dev(u32 *dst, void __iomem *addr, unsigned long nword); +void pdu_from_dev_s(unsigned char *dst, void __iomem *addr, unsigned long nword, + int endian); +void pdu_to_dev_s(void __iomem *addr, const unsigned char *src, + unsigned long nword, int endian); +struct device *get_ddt_device(void); +int pdu_mem_init(void *device); +void pdu_mem_deinit(void *device); +int pdu_ddt_init(struct pdu_ddt *ddt, unsigned long limit); +int pdu_ddt_add(struct pdu_ddt *ddt, dma_addr_t phys, unsigned long size); +int pdu_ddt_free(struct pdu_ddt *ddt); +int pdu_get_version(void __iomem *dev, struct pdu_info *inf); + +#endif diff --git a/drivers/crypto/dwc-spacc/spacc_interrupt.c b/drivers/crypto/dwc-spacc/spacc_interrupt.c new file mode 100644 index 000000000000..176b3d6be25d --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_interrupt.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "spacc_core.h" + +/* Read the IRQ status register and process as needed */ + + +void spacc_disable_int (struct spacc_device *spacc); + +static inline uint32_t _spacc_get_stat_cnt(struct spacc_device *spacc) +{ + u32 fifo; + + if (spacc->config.is_qos) + fifo = SPACC_FIFO_STAT_STAT_CNT_GET_QOS(readl(spacc->regmap + + SPACC_REG_FIFO_STAT)); + else + fifo = SPACC_FIFO_STAT_STAT_CNT_GET(readl(spacc->regmap + + SPACC_REG_FIFO_STAT)); + + return fifo; +} + +static int spacc_pop_packets_ex(struct spacc_device *spacc, int *num_popped, + unsigned long *lock_flag) +{ + int jobs; + int ret = -EINPROGRESS; + struct spacc_job *job = NULL; + u32 cmdstat, swid, spacc_errcode = SPACC_OK; + + *num_popped = 0; + + while ((jobs = _spacc_get_stat_cnt(spacc))) { + while (jobs-- > 0) { + /* write the pop register to get the next job */ + writel(1, spacc->regmap + SPACC_REG_STAT_POP); + cmdstat = readl(spacc->regmap + SPACC_REG_STATUS); + + swid = SPACC_STATUS_SW_ID_GET(cmdstat); + + if (spacc->job_lookup[swid] == SPACC_JOB_IDX_UNUSED) { + ret = -EIO; + goto ERR; + } + + /* find the associated job with popped swid */ + if (swid < 0 || swid >= SPACC_MAX_JOBS) + job = NULL; + else + job = &spacc->job[spacc->job_lookup[swid]]; + + if (!job) { + ret = -EIO; + goto ERR; + } + + /* mark job as done */ + job->job_done = 1; + spacc->job_lookup[swid] = SPACC_JOB_IDX_UNUSED; + spacc_errcode = SPACC_GET_STATUS_RET_CODE(cmdstat); + + switch (spacc_errcode) { + case SPACC_ICVFAIL: + ret = -EBADMSG; + break; + case SPACC_MEMERR: + ret = -EINVAL; + break; + case SPACC_BLOCKERR: + ret = -EINVAL; + break; + case SPACC_SECERR: + ret = -EIO; + break; + case SPACC_OK: + ret = CRYPTO_OK; + break; + default: + pr_debug("Invalid SPAcc Error"); + } + + job->job_err = ret; + + /* + * We're done touching the SPAcc hw, so release the + * lock across the job callback. It must be reacquired + * before continuing to the next iteration. + */ + + if (job->cb) { + spin_unlock_irqrestore(&spacc->lock, + *lock_flag); + job->cb(spacc, job->cbdata); + spin_lock_irqsave(&spacc->lock, + *lock_flag); + } + + (*num_popped)++; + } + } + + if (!*num_popped) + pr_debug(" Failed to pop a single job\n"); + +ERR: + spacc_process_jb(spacc); + + /* reset the WD timer to the original value*/ + if (spacc->op_mode == SPACC_OP_MODE_WD) + spacc_set_wd_count(spacc, spacc->config.wd_timer); + + if (*num_popped && spacc->spacc_notify_jobs) + spacc->spacc_notify_jobs(spacc); + + return ret; +} + +int spacc_pop_packets(struct spacc_device *spacc, int *num_popped) +{ + int err; + unsigned long lock_flag; + + spin_lock_irqsave(&spacc->lock, lock_flag); + err = spacc_pop_packets_ex(spacc, num_popped, &lock_flag); + spin_unlock_irqrestore(&spacc->lock, lock_flag); + + return err; +} + +uint32_t spacc_process_irq(struct spacc_device *spacc) +{ + u32 temp; + int x, cmd_max; + unsigned long lock_flag; + + spin_lock_irqsave(&spacc->lock, lock_flag); + + temp = readl(spacc->regmap + SPACC_REG_IRQ_STAT); + + /* clear interrupt pin and run registered callback */ + if (temp & SPACC_IRQ_STAT_STAT) { + SPACC_IRQ_STAT_CLEAR_STAT(spacc); + if (spacc->op_mode == SPACC_OP_MODE_IRQ) { + spacc->config.fifo_cnt <<= 2; + if (spacc->config.fifo_cnt >= + spacc->config.stat_fifo_depth) + spacc->config.fifo_cnt = + spacc->config.stat_fifo_depth; + + /* update fifo count to allow more stati to pile up*/ + spacc_irq_stat_enable(spacc, spacc->config.fifo_cnt); + /* reenable CMD0 empty interrupt*/ + spacc_irq_cmdx_enable(spacc, 0, 0); + } + + if (spacc->irq_cb_stat) + spacc->irq_cb_stat(spacc); + } + + /* Watchdog IRQ */ + if (spacc->op_mode == SPACC_OP_MODE_WD) { + if (temp & SPACC_IRQ_STAT_STAT_WD) { + if (++spacc->wdcnt == SPACC_WD_LIMIT) { + /* this happens when you get too many IRQs that + * go unanswered + */ + spacc_irq_stat_wd_disable(spacc); + /* we set the STAT CNT to 1 so that every job + * generates an IRQ now + */ + spacc_irq_stat_enable(spacc, 1); + spacc->op_mode = SPACC_OP_MODE_IRQ; + } else if (spacc->config.wd_timer < (0xFFFFFFUL >> 4)) { + /* if the timer isn't too high lets bump it up + * a bit so as to give the IRQ a chance to + * reply + */ + spacc_set_wd_count(spacc, + spacc->config.wd_timer << 4); + } + + SPACC_IRQ_STAT_CLEAR_STAT_WD(spacc); + if (spacc->irq_cb_stat_wd) + spacc->irq_cb_stat_wd(spacc); + } + } + + if (spacc->op_mode == SPACC_OP_MODE_IRQ) { + cmd_max = (spacc->config.is_qos ? SPACC_CMDX_MAX_QOS : + SPACC_CMDX_MAX); + for (x = 0; x < cmd_max; x++) { + if (temp & SPACC_IRQ_STAT_CMDX(x)) { + spacc->config.fifo_cnt = 1; + /* disable CMD0 interrupt since STAT=1 */ + spacc_irq_cmdx_disable(spacc, x); + spacc_irq_stat_enable(spacc, + spacc->config.fifo_cnt); + + SPACC_IRQ_STAT_CLEAR_CMDX(spacc, x); + /* run registered callback */ + if (spacc->irq_cb_cmdx) + spacc->irq_cb_cmdx(spacc, x); + } + } + } + + spin_unlock_irqrestore(&spacc->lock, lock_flag); + + return temp; +} + +void spacc_set_wd_count(struct spacc_device *spacc, uint32_t val) +{ + writel(val, spacc->regmap + SPACC_REG_STAT_WD_CTRL); +} + +/* cmdx and cmdx_cnt depend on HW config + * cmdx can be 0, 1 or 2 + * cmdx_cnt must be 2^6 or less + */ +void spacc_irq_cmdx_enable(struct spacc_device *spacc, int cmdx, int cmdx_cnt) +{ + u32 temp; + + /* read the reg, clear the bit range and set the new value */ + temp = readl(spacc->regmap + SPACC_REG_IRQ_CTRL) & + (~SPACC_IRQ_CTRL_CMDX_CNT_MASK(cmdx)); + temp |= SPACC_IRQ_CTRL_CMDX_CNT_SET(cmdx, cmdx_cnt); + + writel(temp | SPACC_IRQ_CTRL_CMDX_CNT_SET(cmdx, cmdx_cnt), + spacc->regmap + SPACC_REG_IRQ_CTRL); + + writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_CMD(cmdx), + spacc->regmap + SPACC_REG_IRQ_EN); +} + +void spacc_irq_cmdx_disable(struct spacc_device *spacc, int cmdx) +{ + writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & + (~SPACC_IRQ_EN_CMD(cmdx)), spacc->regmap + SPACC_REG_IRQ_EN); +} + +void spacc_irq_stat_enable(struct spacc_device *spacc, int stat_cnt) +{ + u32 temp; + + temp = readl(spacc->regmap + SPACC_REG_IRQ_CTRL); + if (spacc->config.is_qos) { + temp &= (~SPACC_IRQ_CTRL_STAT_CNT_MASK_QOS); + temp |= SPACC_IRQ_CTRL_STAT_CNT_SET_QOS(stat_cnt); + } else { + temp &= (~SPACC_IRQ_CTRL_STAT_CNT_MASK); + temp |= SPACC_IRQ_CTRL_STAT_CNT_SET(stat_cnt); + } + + writel(temp, spacc->regmap + SPACC_REG_IRQ_CTRL); + writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_STAT, + spacc->regmap + SPACC_REG_IRQ_EN); +} + +void spacc_irq_stat_disable(struct spacc_device *spacc) +{ + writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & (~SPACC_IRQ_EN_STAT), + spacc->regmap + SPACC_REG_IRQ_EN); +} + +void spacc_irq_stat_wd_enable(struct spacc_device *spacc) +{ + writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_STAT_WD, + spacc->regmap + SPACC_REG_IRQ_EN); +} + +void spacc_irq_stat_wd_disable(struct spacc_device *spacc) +{ + writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & + (~SPACC_IRQ_EN_STAT_WD), spacc->regmap + SPACC_REG_IRQ_EN); +} + +void spacc_irq_glbl_enable(struct spacc_device *spacc) +{ + writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_GLBL, + spacc->regmap + SPACC_REG_IRQ_EN); +} + +void spacc_irq_glbl_disable(struct spacc_device *spacc) +{ + writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & (~SPACC_IRQ_EN_GLBL), + spacc->regmap + SPACC_REG_IRQ_EN); +} + +void spacc_disable_int (struct spacc_device *spacc) +{ + writel(0, spacc->regmap + SPACC_REG_IRQ_EN); +} + +/* a function to run callbacks in the IRQ handler */ +irqreturn_t spacc_irq_handler(int irq, void *dev) +{ + struct spacc_priv *priv = platform_get_drvdata(to_platform_device(dev)); + struct spacc_device *spacc = &priv->spacc; + + if (spacc->config.oldtimer != spacc->config.timer) { + priv->spacc.config.wd_timer = spacc->config.timer; + spacc_set_wd_count(&priv->spacc, priv->spacc.config.wd_timer); + spacc->config.oldtimer = spacc->config.timer; + } + + /* check irq flags and process as required */ + if (!spacc_process_irq(spacc)) + return IRQ_NONE; + + return IRQ_HANDLED; +} diff --git a/drivers/crypto/dwc-spacc/spacc_manager.c b/drivers/crypto/dwc-spacc/spacc_manager.c new file mode 100644 index 000000000000..3b26b27a998f --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_manager.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "spacc_core.h" + +#define MIN(x, y) (((x) < (y)) ? (x) : (y)) + +/* prevent reading past the end of the buffer */ +static void read_from_buf(unsigned char *dst, unsigned char *src, + int off, int n, int max) +{ + if (!dst) + return; + + while (off < max && n) { + *dst++ = src[off++]; + --n; + } +} + +static void write_to_buf(unsigned char *dst, const unsigned char *src, + int off, int n, int len) +{ + if (!src) + return; + + while (n && (off < len)) { + dst[off++] = *src++; + --n; + } +} + +/* This function is not meant to be called directly, + * it should be called from the job manager + */ +static int spacc_ctx_request(struct spacc_device *spacc, + int ctx_id, int ncontig) +{ + int ret; + int x, y, count; + unsigned long lock_flag; + + if (!spacc) + return -1; + + if (ctx_id > spacc->config.num_ctx) + return -1; + + if (ncontig < 1 || ncontig > spacc->config.num_ctx) + return -1; + + ret = CRYPTO_OK; + + spin_lock_irqsave(&spacc->ctx_lock, lock_flag); + /* allocating scheme, look for contiguous contexts. Free contexts have + * a ref_cnt of 0. + * If specific ctx_id is requested, + * test the ncontig and then bump the ref_cnt + */ + if (ctx_id != -1) { + if ((&spacc->ctx[ctx_id])->ncontig != ncontig - 1) + ret = -1; + } else { + /* check to see if ncontig are free + * loop over all available contexts to find the first + * ncontig empty ones + */ + for (x = 0; x <= (spacc->config.num_ctx - ncontig); ) { + count = ncontig; + while (count) { + if ((&spacc->ctx[x + count - 1])->ref_cnt != 0) { + /* incr x to past failed count + * location + */ + x = x + count; + break; + } + count--; + } + if (count != 0) { + ret = -1; + /* test next x */ + } else { + ctx_id = x; + ret = CRYPTO_OK; + break; + } + } + } + + if (ret == CRYPTO_OK) { + /* ctx_id is good so mark used */ + for (y = 0; y < ncontig; y++) + (&spacc->ctx[ctx_id + y])->ref_cnt++; + (&spacc->ctx[ctx_id])->ncontig = ncontig - 1; + } else { + ctx_id = -1; + } + + spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag); + + return ctx_id; +} + +static int spacc_ctx_release(struct spacc_device *spacc, int ctx_id) +{ + int y; + int ncontig; + unsigned long lock_flag; + + if (ctx_id < 0 || ctx_id > spacc->config.num_ctx) + return -EINVAL; + + spin_lock_irqsave(&spacc->ctx_lock, lock_flag); + /* release the base context and contiguous block */ + ncontig = (&spacc->ctx[ctx_id])->ncontig; + for (y = 0; y <= ncontig; y++) { + if ((&spacc->ctx[ctx_id + y])->ref_cnt > 0) + (&spacc->ctx[ctx_id + y])->ref_cnt--; + } + + if ((&spacc->ctx[ctx_id])->ref_cnt == 0) { + (&spacc->ctx[ctx_id])->ncontig = 0; +#ifdef CONFIG_CRYPTO_DEV_SPACC_SECURE_MODE + /* TODO: This driver works in harmony with "normal" kernel + * processes so we release the context all the time + * normally this would be done from a "secure" kernel process + * (trustzone/etc). This hack is so that SPACC.0 + * cores can both use the same context space. + */ + writel(ctx_id, spacc->regmap + SPACC_REG_SECURE_RELEASE); +#endif + } + + spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag); + + return CRYPTO_OK; +} + +/* Job manager: This will reset all job data, pointers, etc */ +void spacc_job_init_all(struct spacc_device *spacc) +{ + int x; + struct spacc_job *job; + + for (x = 0; x < (SPACC_MAX_JOBS); x++) { + job = &spacc->job[x]; + memset(job, 0, sizeof(struct spacc_job)); + + job->job_swid = SPACC_JOB_IDX_UNUSED; + job->job_used = SPACC_JOB_IDX_UNUSED; + spacc->job_lookup[x] = SPACC_JOB_IDX_UNUSED; + } +} + +/* get a new job id and use a specific ctx_idx or -1 for a new one */ +int spacc_job_request(struct spacc_device *spacc, int ctx_idx) +{ + int x, ret; + struct spacc_job *job; + unsigned long lock_flag; + + if (!spacc) + return -1; + + spin_lock_irqsave(&spacc->lock, lock_flag); + + /* find the first available job id */ + for (x = 0; x < SPACC_MAX_JOBS; x++) { + job = &spacc->job[x]; + if (job->job_used == SPACC_JOB_IDX_UNUSED) { + job->job_used = x; + break; + } + } + + if (x == SPACC_MAX_JOBS) { + ret = -1; + } else { + /* associate a single context to go with job */ + ret = spacc_ctx_request(spacc, ctx_idx, 1); + if (ret != -1) { + job->ctx_idx = ret; + ret = x; + } + } + + spin_unlock_irqrestore(&spacc->lock, lock_flag); + + return ret; +} + +int spacc_job_release(struct spacc_device *spacc, int job_idx) +{ + int ret; + struct spacc_job *job; + unsigned long lock_flag; + + if (!spacc) + return -EINVAL; + + if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) + return -ENXIO; + + spin_lock_irqsave(&spacc->lock, lock_flag); + + job = &spacc->job[job_idx]; + /* release context that goes with job */ + ret = spacc_ctx_release(spacc, job->ctx_idx); + job->ctx_idx = SPACC_CTX_IDX_UNUSED; + job->job_used = SPACC_JOB_IDX_UNUSED; + /* disable any callback*/ + job->cb = NULL; + + /* NOTE: this leaves ctrl data in memory */ + spin_unlock_irqrestore(&spacc->lock, lock_flag); + + return ret; +} + +int spacc_handle_release(struct spacc_device *spacc, int job_idx) +{ + int ret = 0; + struct spacc_job *job; + unsigned long lock_flag; + + if (!spacc) + return -EINVAL; + + if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) + return -ENXIO; + + spin_lock_irqsave(&spacc->lock, lock_flag); + + job = &spacc->job[job_idx]; + job->job_used = SPACC_JOB_IDX_UNUSED; + job->cb = NULL; /* disable any callback*/ + + /* NOTE: this leaves ctrl data in memory */ + spin_unlock_irqrestore(&spacc->lock, lock_flag); + + return ret; +} + +/* Return a context structure for a job idx or null if invalid */ +struct spacc_ctx *context_lookup_by_job(struct spacc_device *spacc, int job_idx) +{ + if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) + return NULL; + + return &spacc->ctx[(&spacc->job[job_idx])->ctx_idx]; +} + +int spacc_process_jb(struct spacc_device *spacc) +{ + int tail, ret; + + /* are there jobs in the buffer? */ + while (spacc->jb_head != spacc->jb_tail) { + tail = spacc->jb_tail; + + if (spacc->job_buffer[tail].active) { + ret = spacc_packet_enqueue_ddt_ex(spacc, 0, + spacc->job_buffer[tail].job_idx, + spacc->job_buffer[tail].src, + spacc->job_buffer[tail].dst, + spacc->job_buffer[tail].proc_sz, + spacc->job_buffer[tail].aad_offset, + spacc->job_buffer[tail].pre_aad_sz, + spacc->job_buffer[tail].post_aad_sz, + spacc->job_buffer[tail].iv_offset, + spacc->job_buffer[tail].prio); + + if (ret != -EBUSY) + spacc->job_buffer[tail].active = 0; + else + return -1; + } + + tail++; + if (tail == SPACC_MAX_JOB_BUFFERS) + tail = 0; + + spacc->jb_tail = tail; + } + + return 0; +} + +/* Write appropriate context data which depends on operation and mode */ +int spacc_write_context(struct spacc_device *spacc, int job_idx, int op, + const unsigned char *key, int ksz, + const unsigned char *iv, int ivsz) +{ + int buflen; + int ret = CRYPTO_OK; + unsigned char buf[300]; + struct spacc_ctx *ctx = NULL; + struct spacc_job *job = NULL; + + if (job_idx < 0 || job_idx > SPACC_MAX_JOBS) + return -ENXIO; + + job = &spacc->job[job_idx]; + ctx = context_lookup_by_job(spacc, job_idx); + + if (!job || !ctx) + return -EIO; + + switch (op) { + case SPACC_CRYPTO_OPERATION: + /* get page size and then read so we can do a + * read-modify-write cycle + */ + buflen = MIN(sizeof(buf), + (unsigned int)spacc->config.ciph_page_size); + + pdu_from_dev_s(buf, ctx->ciph_key, buflen >> 2, + spacc->config.spacc_endian); + + switch (job->enc_mode) { + case CRYPTO_MODE_SM4_ECB: + case CRYPTO_MODE_SM4_CBC: + case CRYPTO_MODE_SM4_CFB: + case CRYPTO_MODE_SM4_OFB: + case CRYPTO_MODE_SM4_CTR: + case CRYPTO_MODE_SM4_CCM: + case CRYPTO_MODE_SM4_GCM: + case CRYPTO_MODE_SM4_CS1: + case CRYPTO_MODE_SM4_CS2: + case CRYPTO_MODE_SM4_CS3: + case CRYPTO_MODE_AES_ECB: + case CRYPTO_MODE_AES_CBC: + case CRYPTO_MODE_AES_CS1: + case CRYPTO_MODE_AES_CS2: + case CRYPTO_MODE_AES_CS3: + case CRYPTO_MODE_AES_CFB: + case CRYPTO_MODE_AES_OFB: + case CRYPTO_MODE_AES_CTR: + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_AES_GCM: + write_to_buf(buf, key, 0, ksz, buflen); + if (iv) { + unsigned char one[4] = { 0, 0, 0, 1 }; + unsigned long enc1, enc2; + + enc1 = CRYPTO_MODE_AES_GCM; + enc2 = CRYPTO_MODE_SM4_GCM; + + write_to_buf(buf, iv, 32, ivsz, buflen); + if (ivsz == 12 && + (job->enc_mode == enc1 || + job->enc_mode == enc2)) + write_to_buf(buf, one, 11 * 4, 4, + buflen); + } + break; + case CRYPTO_MODE_SM4_F8: + case CRYPTO_MODE_AES_F8: + if (key) { + write_to_buf(buf, key + ksz, 0, ksz, buflen); + write_to_buf(buf, key, 48, ksz, buflen); + } + write_to_buf(buf, iv, 32, 16, buflen); + break; + case CRYPTO_MODE_SM4_XTS: + case CRYPTO_MODE_AES_XTS: + if (key) { + write_to_buf(buf, key, 0, + ksz >> 1, buflen); + write_to_buf(buf, key + (ksz >> 1), 48, + ksz >> 1, buflen); + /* divide by two since that's + * what we program the hardware + */ + ksz = ksz >> 1; + } + write_to_buf(buf, iv, 32, 16, buflen); + break; + case CRYPTO_MODE_MULTI2_ECB: + case CRYPTO_MODE_MULTI2_CBC: + case CRYPTO_MODE_MULTI2_OFB: + case CRYPTO_MODE_MULTI2_CFB: + write_to_buf(buf, key, 0, ksz, buflen); + write_to_buf(buf, iv, 0x28, ivsz, buflen); + if (ivsz <= 8) { + /*default to 128 rounds*/ + unsigned char rounds[4] = { 0, 0, 0, 128}; + + write_to_buf(buf, rounds, 0x30, 4, buflen); + } + break; + case CRYPTO_MODE_3DES_CBC: + case CRYPTO_MODE_3DES_ECB: + case CRYPTO_MODE_DES_CBC: + case CRYPTO_MODE_DES_ECB: + write_to_buf(buf, iv, 0, 8, buflen); + write_to_buf(buf, key, 8, ksz, buflen); + break; + case CRYPTO_MODE_KASUMI_ECB: + case CRYPTO_MODE_KASUMI_F8: + write_to_buf(buf, iv, 16, 8, buflen); + write_to_buf(buf, key, 0, 16, buflen); + break; + case CRYPTO_MODE_SNOW3G_UEA2: + case CRYPTO_MODE_ZUC_UEA3: + write_to_buf(buf, key, 0, 32, buflen); + break; + case CRYPTO_MODE_CHACHA20_STREAM: + case CRYPTO_MODE_CHACHA20_POLY1305: + write_to_buf(buf, key, 0, ksz, buflen); + write_to_buf(buf, iv, 32, ivsz, buflen); + break; + case CRYPTO_MODE_NULL: + break; + } + + if (key) { + job->ckey_sz = SPACC_SET_CIPHER_KEY_SZ(ksz); + job->first_use = 1; + } + pdu_to_dev_s(ctx->ciph_key, buf, buflen >> 2, + spacc->config.spacc_endian); + break; + + case SPACC_HASH_OPERATION: + /* get page size and then read so we can do a + * read-modify-write cycle + */ + buflen = MIN(sizeof(buf), + (u32)spacc->config.hash_page_size); + pdu_from_dev_s(buf, ctx->hash_key, buflen >> 2, + spacc->config.spacc_endian); + + switch (job->hash_mode) { + case CRYPTO_MODE_MAC_XCBC: + case CRYPTO_MODE_MAC_SM4_XCBC: + if (key) { + write_to_buf(buf, key + (ksz - 32), 32, 32, + buflen); + write_to_buf(buf, key, 0, (ksz - 32), + buflen); + job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz - 32); + } + break; + case CRYPTO_MODE_HASH_CRC32: + case CRYPTO_MODE_MAC_SNOW3G_UIA2: + case CRYPTO_MODE_MAC_ZUC_UIA3: + if (key) { + write_to_buf(buf, key, 0, ksz, buflen); + job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz); + } + break; + case CRYPTO_MODE_MAC_POLY1305: + write_to_buf(buf, key, 0, ksz, buflen); + write_to_buf(buf, iv, 32, ivsz, buflen); + break; + case CRYPTO_MODE_HASH_CSHAKE128: + case CRYPTO_MODE_HASH_CSHAKE256: + /* use "iv" and "key" to */ + /* pass s-string and n-string */ + write_to_buf(buf, iv, 0, ivsz, buflen); + write_to_buf(buf, key, + spacc->config.string_size, ksz, buflen); + break; + case CRYPTO_MODE_MAC_KMAC128: + case CRYPTO_MODE_MAC_KMAC256: + case CRYPTO_MODE_MAC_KMACXOF128: + case CRYPTO_MODE_MAC_KMACXOF256: + /* use "iv" and "key" to pass s-string & key */ + write_to_buf(buf, iv, 0, ivsz, buflen); + write_to_buf(buf, key, + spacc->config.string_size, ksz, buflen); + job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz); + break; + default: + if (key) { + job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz); + write_to_buf(buf, key, 0, ksz, buflen); + } + } + pdu_to_dev_s(ctx->hash_key, buf, buflen >> 2, + spacc->config.spacc_endian); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +int spacc_read_context(struct spacc_device *spacc, int job_idx, + int op, unsigned char *key, int ksz, + unsigned char *iv, int ivsz) +{ + int buflen; + int ret = CRYPTO_OK; + unsigned char buf[300]; + struct spacc_ctx *ctx = NULL; + struct spacc_job *job = NULL; + + if (job_idx < 0 || job_idx > SPACC_MAX_JOBS) + return -ENXIO; + + job = &spacc->job[job_idx]; + ctx = context_lookup_by_job(spacc, job_idx); + + if (!ctx) + return -EIO; + + switch (op) { + case SPACC_CRYPTO_OPERATION: + buflen = MIN(sizeof(buf), + (u32)spacc->config.ciph_page_size); + pdu_from_dev_s(buf, ctx->ciph_key, buflen >> 2, + spacc->config.spacc_endian); + + switch (job->enc_mode) { + case CRYPTO_MODE_SM4_ECB: + case CRYPTO_MODE_SM4_CBC: + case CRYPTO_MODE_SM4_CFB: + case CRYPTO_MODE_SM4_OFB: + case CRYPTO_MODE_SM4_CTR: + case CRYPTO_MODE_SM4_CCM: + case CRYPTO_MODE_SM4_GCM: + case CRYPTO_MODE_SM4_CS1: + case CRYPTO_MODE_SM4_CS2: + case CRYPTO_MODE_SM4_CS3: + case CRYPTO_MODE_AES_ECB: + case CRYPTO_MODE_AES_CBC: + case CRYPTO_MODE_AES_CS1: + case CRYPTO_MODE_AES_CS2: + case CRYPTO_MODE_AES_CS3: + case CRYPTO_MODE_AES_CFB: + case CRYPTO_MODE_AES_OFB: + case CRYPTO_MODE_AES_CTR: + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_AES_GCM: + read_from_buf(key, buf, 0, ksz, buflen); + read_from_buf(iv, buf, 32, 16, buflen); + break; + case CRYPTO_MODE_CHACHA20_STREAM: + read_from_buf(key, buf, 0, ksz, buflen); + read_from_buf(iv, buf, 32, 16, buflen); + break; + case CRYPTO_MODE_SM4_F8: + case CRYPTO_MODE_AES_F8: + if (key) { + read_from_buf(key + ksz, buf, 0, ksz, buflen); + read_from_buf(key, buf, 48, ksz, buflen); + } + read_from_buf(iv, buf, 32, 16, buflen); + break; + case CRYPTO_MODE_SM4_XTS: + case CRYPTO_MODE_AES_XTS: + if (key) { + read_from_buf(key, buf, 0, ksz >> 1, buflen); + read_from_buf(key + (ksz >> 1), buf, + 48, ksz >> 1, buflen); + } + read_from_buf(iv, buf, 32, 16, buflen); + break; + case CRYPTO_MODE_MULTI2_ECB: + case CRYPTO_MODE_MULTI2_CBC: + case CRYPTO_MODE_MULTI2_OFB: + case CRYPTO_MODE_MULTI2_CFB: + read_from_buf(key, buf, 0, ksz, buflen); + /* Number of rounds at the end of the IV */ + read_from_buf(iv, buf, 0x28, ivsz, buflen); + break; + case CRYPTO_MODE_3DES_CBC: + case CRYPTO_MODE_3DES_ECB: + read_from_buf(iv, buf, 0, 8, buflen); + read_from_buf(key, buf, 8, 24, buflen); + break; + case CRYPTO_MODE_DES_CBC: + case CRYPTO_MODE_DES_ECB: + read_from_buf(iv, buf, 0, 8, buflen); + read_from_buf(key, buf, 8, 8, buflen); + break; + case CRYPTO_MODE_KASUMI_ECB: + case CRYPTO_MODE_KASUMI_F8: + read_from_buf(iv, buf, 16, 8, buflen); + read_from_buf(key, buf, 0, 16, buflen); + break; + case CRYPTO_MODE_SNOW3G_UEA2: + case CRYPTO_MODE_ZUC_UEA3: + read_from_buf(key, buf, 0, 32, buflen); + break; + case CRYPTO_MODE_NULL: + break; + } + break; + + case SPACC_HASH_OPERATION: + buflen = MIN(sizeof(buf), + (u32)spacc->config.hash_page_size); + pdu_from_dev_s(buf, ctx->hash_key, buflen >> 2, + spacc->config.spacc_endian); + + switch (job->hash_mode) { + case CRYPTO_MODE_MAC_XCBC: + case CRYPTO_MODE_MAC_SM4_XCBC: + if (key && ksz <= 64) { + read_from_buf(key + (ksz - 32), buf, + 32, 32, buflen); + read_from_buf(key, buf, 0, ksz - 32, buflen); + } + break; + case CRYPTO_MODE_HASH_CRC32: + read_from_buf(iv, buf, 0, ivsz, buflen); + break; + case CRYPTO_MODE_MAC_SNOW3G_UIA2: + case CRYPTO_MODE_MAC_ZUC_UIA3: + read_from_buf(key, buf, 0, 32, buflen); + break; + default: + read_from_buf(key, buf, 0, ksz, buflen); + } + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/* Context manager: This will reset all reference counts, pointers, etc */ +void spacc_ctx_init_all(struct spacc_device *spacc) +{ + int x; + struct spacc_ctx *ctx; + unsigned long lock_flag; + + spin_lock_irqsave(&spacc->ctx_lock, lock_flag); + + /* initialize contexts */ + for (x = 0; x < spacc->config.num_ctx; x++) { + ctx = &spacc->ctx[x]; + + /* sets everything including ref_cnt and ncontig to 0 */ + memset(ctx, 0, sizeof(*ctx)); + + ctx->ciph_key = spacc->regmap + SPACC_CTX_CIPH_KEY + + (x * spacc->config.ciph_page_size); + ctx->hash_key = spacc->regmap + SPACC_CTX_HASH_KEY + + (x * spacc->config.hash_page_size); + } + + spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag); +} diff --git a/drivers/crypto/dwc-spacc/spacc_skcipher.c b/drivers/crypto/dwc-spacc/spacc_skcipher.c new file mode 100644 index 000000000000..488c03ff6c36 --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_skcipher.c @@ -0,0 +1,712 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include "spacc_device.h" +#include "spacc_core.h" + +static LIST_HEAD(spacc_cipher_alg_list); +static DEFINE_MUTEX(spacc_cipher_alg_mutex); + +static struct mode_tab possible_ciphers[] = { + /* {keylen, MODE_TAB_CIPH(name, id, iv_len, blk_len)} */ + + /* SM4 */ + { MODE_TAB_CIPH("cbc(sm4)", SM4_CBC, 16, 16), .keylen[0] = 16, + .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 }, + { MODE_TAB_CIPH("ecb(sm4)", SM4_ECB, 0, 16), .keylen[0] = 16, + .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 }, + { MODE_TAB_CIPH("ctr(sm4)", SM4_CTR, 16, 1), .keylen[0] = 16, + .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 }, + { MODE_TAB_CIPH("xts(sm4)", SM4_XTS, 16, 16), .keylen[0] = 32, + .chunksize = 16, .walksize = 16, .min_keysize = 32, .max_keysize = 32 }, + { MODE_TAB_CIPH("cts(cbc(sm4))", SM4_CS3, 16, 16), .keylen[0] = 16, + .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 }, + + /* AES */ + { MODE_TAB_CIPH("cbc(aes)", AES_CBC, 16, 16), .keylen = { 16, 24, 32 }, + .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 }, + { MODE_TAB_CIPH("ecb(aes)", AES_ECB, 0, 16), .keylen = { 16, 24, 32 }, + .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 }, + { MODE_TAB_CIPH("xts(aes)", AES_XTS, 16, 16), .keylen = { 32, 48, 64 }, + .chunksize = 16, .walksize = 16, .min_keysize = 32, .max_keysize = 64 }, + { MODE_TAB_CIPH("cts(cbc(aes))", AES_CS3, 16, 16), + .keylen = { 16, 24, 32 }, .chunksize = 16, .walksize = 16, + .min_keysize = 16, .max_keysize = 32 }, + { MODE_TAB_CIPH("ctr(aes)", AES_CTR, 16, 1), .keylen = { 16, 24, 32 }, + .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 }, + + /* CHACHA20 */ + { MODE_TAB_CIPH("chacha20", CHACHA20_STREAM, 16, 1), .keylen[0] = 32, + .chunksize = 64, .walksize = 64, .min_keysize = 32, .max_keysize = 32 }, + + /* DES */ + { MODE_TAB_CIPH("ecb(des)", DES_ECB, 0, 8), .keylen[0] = 8, + .chunksize = 8, .walksize = 8, .min_keysize = 8, .max_keysize = 8}, + { MODE_TAB_CIPH("cbc(des)", DES_CBC, 8, 8), .keylen[0] = 8, + .chunksize = 8, .walksize = 8, .min_keysize = 8, .max_keysize = 8}, + { MODE_TAB_CIPH("ecb(des3_ede)", 3DES_ECB, 0, 8), .keylen[0] = 24, + .chunksize = 8, .walksize = 8, .min_keysize = 24, .max_keysize = 24 }, + { MODE_TAB_CIPH("cbc(des3_ede)", 3DES_CBC, 8, 8), .keylen[0] = 24, + .chunksize = 8, .walksize = 8, .min_keysize = 24, .max_keysize = 24 }, +}; + +static int spacc_skcipher_fallback(unsigned char *name, + struct skcipher_request *req, int enc_dec) +{ + int ret = 0; + struct crypto_skcipher *reqtfm = crypto_skcipher_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); + + tctx->fb.cipher = crypto_alloc_skcipher(name, + CRYPTO_ALG_TYPE_SKCIPHER, + CRYPTO_ALG_NEED_FALLBACK); + + crypto_skcipher_set_reqsize(reqtfm, + sizeof(struct spacc_crypto_reqctx) + + crypto_skcipher_reqsize(tctx->fb.cipher)); + ret = crypto_skcipher_setkey(tctx->fb.cipher, tctx->cipher_key, + tctx->key_len); + if (ret) + return ret; + + skcipher_request_set_tfm(&ctx->fb.cipher_req, tctx->fb.cipher); + skcipher_request_set_crypt(&ctx->fb.cipher_req, req->src, req->dst, + req->cryptlen, req->iv); + + if (enc_dec) + ret = crypto_skcipher_decrypt(&ctx->fb.cipher_req); + else + ret = crypto_skcipher_encrypt(&ctx->fb.cipher_req); + + crypto_free_skcipher(tctx->fb.cipher); + tctx->fb.cipher = NULL; + + kfree(tctx->cipher_key); + tctx->cipher_key = NULL; + + return ret; +} + +static void spacc_cipher_cleanup_dma(struct device *dev, + struct skcipher_request *req) +{ + struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); + struct spacc_crypto_ctx *tctx = ctx->ccb.tctx; + + if (req->dst != req->src) { + if (ctx->src_nents) { + dma_unmap_sg(dev, req->src, ctx->src_nents, + DMA_TO_DEVICE); + pdu_ddt_free(&ctx->src); + } + + if (ctx->dst_nents) { + dma_unmap_sg(dev, req->dst, ctx->dst_nents, + DMA_FROM_DEVICE); + pdu_ddt_free(&ctx->dst); + } + } else { + if (ctx->src_nents) { + dma_unmap_sg(dev, req->src, ctx->src_nents, + DMA_TO_DEVICE); + pdu_ddt_free(&ctx->src); + } + } + + kfree(tctx->cipher_key); + tctx->cipher_key = NULL; +} + +static void spacc_cipher_cb(void *spacc, void *tfm) +{ + int err = -1; + struct cipher_cb_data *cb = tfm; + struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(cb->req); + + u32 status_reg = readl(cb->spacc->regmap + SPACC_REG_STATUS); + u32 status_ret = (status_reg >> 24) & 0x03; + + if (ctx->mode == CRYPTO_MODE_DES_CBC || + ctx->mode == CRYPTO_MODE_3DES_CBC) { + spacc_read_context(cb->spacc, cb->tctx->handle, + SPACC_CRYPTO_OPERATION, NULL, 0, + cb->req->iv, 8); + } else if (ctx->mode != CRYPTO_MODE_DES_ECB && + ctx->mode != CRYPTO_MODE_3DES_ECB && + ctx->mode != CRYPTO_MODE_SM4_ECB && + ctx->mode != CRYPTO_MODE_AES_ECB && + ctx->mode != CRYPTO_MODE_SM4_XTS && + ctx->mode != CRYPTO_MODE_KASUMI_ECB) { + if (status_ret == 0x3) { + err = -EINVAL; + goto CALLBACK_ERR; + } + spacc_read_context(cb->spacc, cb->tctx->handle, + SPACC_CRYPTO_OPERATION, NULL, 0, + cb->req->iv, 16); + } + + if (ctx->mode != CRYPTO_MODE_DES_ECB && + ctx->mode != CRYPTO_MODE_DES_CBC && + ctx->mode != CRYPTO_MODE_3DES_ECB && + ctx->mode != CRYPTO_MODE_3DES_CBC) { + if (status_ret == 0x03) { + err = -EINVAL; + goto CALLBACK_ERR; + } + } + + if (ctx->mode == CRYPTO_MODE_SM4_ECB && status_ret == 0x03) { + err = -EINVAL; + goto CALLBACK_ERR; + } + + if (cb->req->dst != cb->req->src) + dma_sync_sg_for_cpu(cb->tctx->dev, cb->req->dst, ctx->dst_nents, + DMA_FROM_DEVICE); + + err = cb->spacc->job[cb->new_handle].job_err; + +CALLBACK_ERR: + spacc_cipher_cleanup_dma(cb->tctx->dev, cb->req); + spacc_close(cb->spacc, cb->new_handle); + skcipher_request_complete(cb->req, err); +} + +static int spacc_cipher_init_dma(struct device *dev, + struct skcipher_request *req) +{ + struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); + int rc; + + if (req->src == req->dst) { + rc = spacc_sg_to_ddt(dev, req->src, req->cryptlen, &ctx->src, + DMA_TO_DEVICE); + if (rc < 0) { + pdu_ddt_free(&ctx->src); + return rc; + } + ctx->src_nents = rc; + } else { + rc = spacc_sg_to_ddt(dev, req->src, req->cryptlen, &ctx->src, + DMA_TO_DEVICE); + if (rc < 0) { + pdu_ddt_free(&ctx->src); + return rc; + } + ctx->src_nents = rc; + + rc = spacc_sg_to_ddt(dev, req->dst, req->cryptlen, &ctx->dst, + DMA_FROM_DEVICE); + if (rc < 0) { + pdu_ddt_free(&ctx->dst); + return rc; + } + ctx->dst_nents = rc; + } + + return 0; +} + +static int spacc_cipher_cra_init(struct crypto_tfm *tfm) +{ + struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm); + const struct spacc_alg *salg = spacc_tfm_skcipher(tfm); + + tctx->keylen = 0; + tctx->cipher_key = NULL; + tctx->handle = -1; + tctx->ctx_valid = false; + tctx->dev = get_device(salg->dev[0]); + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct spacc_crypto_reqctx)); + + return 0; +} + +static void spacc_cipher_cra_exit(struct crypto_tfm *tfm) +{ + struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm); + struct spacc_priv *priv = dev_get_drvdata(tctx->dev); + + + if (tctx->handle >= 0) + spacc_close(&priv->spacc, tctx->handle); + + put_device(tctx->dev); +} + + +static int spacc_check_keylen(const struct spacc_alg *salg, unsigned int keylen) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(salg->mode->keylen); i++) + if (salg->mode->keylen[i] == keylen) + return 0; + + return -EINVAL; +} + +static int spacc_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + int ret = 0, rc = 0, err; + const struct spacc_alg *salg = spacc_tfm_skcipher(&tfm->base); + struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(tfm); + struct spacc_priv *priv = dev_get_drvdata(tctx->dev); + struct spacc_crypto_reqctx *ctx = crypto_skcipher_ctx(tfm); + + err = spacc_check_keylen(salg, keylen); + if (err) + return err; + + ctx->mode = salg->mode->id; + tctx->key_len = keylen; + tctx->cipher_key = kmalloc(keylen, GFP_KERNEL); + memcpy(tctx->cipher_key, key, keylen); + + if (tctx->handle >= 0) { + spacc_close(&priv->spacc, tctx->handle); + put_device(tctx->dev); + tctx->handle = -1; + tctx->dev = NULL; + } + + priv = NULL; + priv = dev_get_drvdata(salg->dev[0]); + tctx->dev = get_device(salg->dev[0]); + ret = spacc_isenabled(&priv->spacc, salg->mode->id, + keylen); + if (ret) + tctx->handle = spacc_open(&priv->spacc, salg->mode->id, + CRYPTO_MODE_NULL, -1, 0, + spacc_cipher_cb, tfm); + + if (tctx->handle < 0) { + put_device(salg->dev[0]); + dev_dbg(salg->dev[0], "failed to open SPAcc context\n"); + return -EINVAL; + } + + /* Weak key Implementation for DES_ECB */ + if (salg->mode->id == CRYPTO_MODE_DES_ECB) { + err = verify_skcipher_des_key(tfm, key); + if (err) + return -EINVAL; + } + + if (salg->mode->id == CRYPTO_MODE_SM4_F8 || + salg->mode->id == CRYPTO_MODE_AES_F8) { + /* f8 mode requires an IV of 128-bits and a key-salt mask, + * equivalent in size to the key. + * AES-F8 or SM4-F8 mode has a SALTKEY prepended to the base + * key. + */ + rc = spacc_write_context(&priv->spacc, tctx->handle, + SPACC_CRYPTO_OPERATION, key, 16, + NULL, 0); + } else { + rc = spacc_write_context(&priv->spacc, tctx->handle, + SPACC_CRYPTO_OPERATION, key, keylen, + NULL, 0); + } + + if (rc < 0) { + dev_dbg(salg->dev[0], "failed with SPAcc write context\n"); + return -EINVAL; + } + + return 0; +} + +static int spacc_cipher_process(struct skcipher_request *req, int enc_dec) +{ + u8 ivc1[16]; + unsigned char *name; + unsigned int len = 0; + u32 num_iv = 0, diff; + u64 num_iv64 = 0, diff64; + unsigned char chacha20_iv[16]; + int rc = 0, ret = 0, i = 0, j = 0; + struct crypto_skcipher *reqtfm = crypto_skcipher_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); + struct spacc_priv *priv = dev_get_drvdata(tctx->dev); + const struct spacc_alg *salg = spacc_tfm_skcipher(&reqtfm->base); + struct spacc_device *device_h = &priv->spacc; + + len = ctx->spacc_cipher_cryptlen / 16; + + if (req->cryptlen == 0) { + if (salg->mode->id == CRYPTO_MODE_SM4_CS3 || + salg->mode->id == CRYPTO_MODE_SM4_XTS || + salg->mode->id == CRYPTO_MODE_AES_XTS || + salg->mode->id == CRYPTO_MODE_AES_CS3) + return -EINVAL; + else + return 0; + } + + /* Given IV - <1st 4-bytes as counter value> + * + * Reversing the order of nonce & counter as, + * <1st 12-bytes as nonce> + * + * and then write to HW context, + * ex: + * Given IV - 2a000000000000000000000000000002 + * Reverse order - 0000000000000000000000020000002a + */ + if (salg->mode->id == CRYPTO_MODE_CHACHA20_STREAM) { + for (i = 4; i < 16; i++) { + chacha20_iv[j] = req->iv[i]; + j++; + } + + j = j + 3; + + for (i = 0; i <= 3; i++) { + chacha20_iv[j] = req->iv[i]; + j--; + } + memcpy(req->iv, chacha20_iv, 16); + } + + if (salg->mode->id == CRYPTO_MODE_SM4_CFB) { + if (req->cryptlen % 16 != 0) { + name = salg->calg->cra_name; + ret = spacc_skcipher_fallback(name, req, enc_dec); + return ret; + } + } + + if (salg->mode->id == CRYPTO_MODE_SM4_XTS || + salg->mode->id == CRYPTO_MODE_SM4_CS3 || + salg->mode->id == CRYPTO_MODE_AES_XTS || + salg->mode->id == CRYPTO_MODE_AES_CS3) { + if (req->cryptlen == 16) { + name = salg->calg->cra_name; + ret = spacc_skcipher_fallback(name, req, enc_dec); + return ret; + } + } + + if (salg->mode->id == CRYPTO_MODE_AES_CTR || + salg->mode->id == CRYPTO_MODE_SM4_CTR) { + /* copy the IV to local buffer */ + for (i = 0; i < 16; i++) + ivc1[i] = req->iv[i]; + + /* 32-bit counter width */ + if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) & (0x2)) { + + for (i = 12; i < 16; i++) { + num_iv <<= 8; + num_iv |= ivc1[i]; + } + + diff = SPACC_CTR_IV_MAX32 - num_iv; + + if (len > diff) { + name = salg->calg->cra_name; + ret = spacc_skcipher_fallback(name, + req, enc_dec); + return ret; + } + } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) + & (0x3)) { /* 64-bit counter width */ + + for (i = 8; i < 16; i++) { + num_iv64 <<= 8; + num_iv64 |= ivc1[i]; + } + + diff64 = SPACC_CTR_IV_MAX64 - num_iv64; + + if (len > diff64) { + name = salg->calg->cra_name; + ret = spacc_skcipher_fallback(name, + req, enc_dec); + return ret; + } + } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) + & (0x1)) { /* 16-bit counter width */ + + for (i = 14; i < 16; i++) { + num_iv <<= 8; + num_iv |= ivc1[i]; + } + + diff = SPACC_CTR_IV_MAX16 - num_iv; + + if (len > diff) { + name = salg->calg->cra_name; + ret = spacc_skcipher_fallback(name, + req, enc_dec); + return ret; + } + } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) + & (0x0)) { /* 8-bit counter width */ + + for (i = 15; i < 16; i++) { + num_iv <<= 8; + num_iv |= ivc1[i]; + } + + diff = SPACC_CTR_IV_MAX8 - num_iv; + + if (len > diff) { + name = salg->calg->cra_name; + ret = spacc_skcipher_fallback(name, + req, enc_dec); + return ret; + } + } + } + + if (salg->mode->id == CRYPTO_MODE_DES_CBC || + salg->mode->id == CRYPTO_MODE_3DES_CBC) + rc = spacc_write_context(&priv->spacc, tctx->handle, + SPACC_CRYPTO_OPERATION, NULL, 0, + req->iv, 8); + else if (salg->mode->id != CRYPTO_MODE_DES_ECB && + salg->mode->id != CRYPTO_MODE_3DES_ECB && + salg->mode->id != CRYPTO_MODE_SM4_ECB && + salg->mode->id != CRYPTO_MODE_AES_ECB && + salg->mode->id != CRYPTO_MODE_KASUMI_ECB) + rc = spacc_write_context(&priv->spacc, tctx->handle, + SPACC_CRYPTO_OPERATION, NULL, 0, + req->iv, 16); + + if (rc < 0) + pr_err("ERR: spacc_write_context\n"); + + /* Initialize the DMA */ + rc = spacc_cipher_init_dma(tctx->dev, req); + + ctx->ccb.new_handle = spacc_clone_handle(&priv->spacc, tctx->handle, + &ctx->ccb); + if (ctx->ccb.new_handle < 0) { + spacc_cipher_cleanup_dma(tctx->dev, req); + dev_dbg(salg->dev[0], "failed to clone handle\n"); + return -EINVAL; + } + + /* copying the data to clone handle */ + ctx->ccb.tctx = tctx; + ctx->ccb.ctx = ctx; + ctx->ccb.req = req; + ctx->ccb.spacc = &priv->spacc; + + if (salg->mode->id == CRYPTO_MODE_SM4_CS3) { + int handle = ctx->ccb.new_handle; + + if (handle < 0 || handle > SPACC_MAX_JOBS) + return -ENXIO; + + device_h->job[handle].auxinfo_cs_mode = 3; + } + + if (enc_dec) { /* for decrypt */ + rc = spacc_set_operation(&priv->spacc, ctx->ccb.new_handle, 1, + ICV_IGNORE, IP_ICV_IGNORE, 0, 0, 0); + spacc_set_key_exp(&priv->spacc, ctx->ccb.new_handle); + } else { /* for encrypt */ + rc = spacc_set_operation(&priv->spacc, ctx->ccb.new_handle, 0, + ICV_IGNORE, IP_ICV_IGNORE, 0, 0, 0); + } + + rc = spacc_packet_enqueue_ddt(&priv->spacc, ctx->ccb.new_handle, + &ctx->src, + (req->dst == req->src) ? &ctx->src : + &ctx->dst, + req->cryptlen, + 0, 0, 0, 0, 0); + if (rc < 0) { + spacc_cipher_cleanup_dma(tctx->dev, req); + spacc_close(&priv->spacc, ctx->ccb.new_handle); + + if (rc != -EBUSY && rc < 0) { + dev_err(tctx->dev, + "failed to enqueue job, ERR: %d\n", rc); + return rc; + } else if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + return -EBUSY; + } + } + + priv->spacc.job[tctx->handle].first_use = 0; + priv->spacc.job[tctx->handle].ctrl &= + ~(1UL << priv->spacc.config.ctrl_map[SPACC_CTRL_KEY_EXP]); + + return -EINPROGRESS; +} + +static int spacc_cipher_encrypt(struct skcipher_request *req) +{ + int rv = 0; + struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); + + ctx->spacc_cipher_cryptlen = req->cryptlen; + + /* enc_dec - 0(encrypt), 1(decrypt) */ + rv = spacc_cipher_process(req, 0); + + return rv; +} + +static int spacc_cipher_decrypt(struct skcipher_request *req) +{ + int rv = 0; + struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); + + ctx->spacc_cipher_cryptlen = req->cryptlen; + + /* enc_dec - 0(encrypt), 1(decrypt) */ + rv = spacc_cipher_process(req, 1); + + return rv; +} + +static struct skcipher_alg spacc_skcipher_alg = { + .setkey = spacc_cipher_setkey, + .encrypt = spacc_cipher_encrypt, + .decrypt = spacc_cipher_decrypt, + /* + * Chunksize: Equal to the block size except for stream cipher + * such as CTR where it is set to the underlying block size. + * + * Walksize: Equal to the chunk size except in cases where the + * algorithm is considerably more efficient if it can operate on + * multiple chunks in parallel. Should be a multiple of chunksize. + */ + .min_keysize = 16, + .max_keysize = 64, + .ivsize = 16, + .chunksize = 16, + .walksize = 16, + .base = { + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 16, + .cra_ctxsize = sizeof(struct spacc_crypto_ctx), + .cra_priority = 300, + .cra_init = spacc_cipher_cra_init, + .cra_exit = spacc_cipher_cra_exit, + .cra_module = THIS_MODULE, + }, +}; + +static void spacc_init_calg(struct crypto_alg *calg, + const struct mode_tab *mode) +{ + + strscpy(calg->cra_name, mode->name, sizeof(mode->name) - 1); + calg->cra_name[sizeof(mode->name) - 1] = '\0'; + + strscpy(calg->cra_driver_name, "spacc-"); + strcat(calg->cra_driver_name, mode->name); + calg->cra_driver_name[sizeof(calg->cra_driver_name) - 1] = '\0'; + calg->cra_blocksize = mode->blocklen; +} + +static int spacc_register_cipher(struct spacc_alg *salg, + unsigned int algo_idx) +{ + int rc; + + salg->calg = &salg->alg.skcipher.base; + salg->alg.skcipher = spacc_skcipher_alg; + + /* this function will assign mode->name to calg->cra_name & + * calg->cra_driver_name + */ + spacc_init_calg(salg->calg, salg->mode); + salg->alg.skcipher.ivsize = salg->mode->ivlen; + salg->alg.skcipher.base.cra_blocksize = salg->mode->blocklen; + + salg->alg.skcipher.chunksize = possible_ciphers[algo_idx].chunksize; + salg->alg.skcipher.walksize = possible_ciphers[algo_idx].walksize; + salg->alg.skcipher.min_keysize = possible_ciphers[algo_idx].min_keysize; + salg->alg.skcipher.max_keysize = possible_ciphers[algo_idx].max_keysize; + + rc = crypto_register_skcipher(&salg->alg.skcipher); + if (rc < 0) + return rc; + + mutex_lock(&spacc_cipher_alg_mutex); + list_add(&salg->list, &spacc_cipher_alg_list); + mutex_unlock(&spacc_cipher_alg_mutex); + + return 0; +} + +int probe_ciphers(struct platform_device *spacc_pdev) +{ + int rc; + unsigned int i, y; + int registered = 0; + struct spacc_alg *salg; + struct spacc_priv *priv = dev_get_drvdata(&spacc_pdev->dev); + + for (i = 0; i < ARRAY_SIZE(possible_ciphers); i++) + possible_ciphers[i].valid = 0; + + for (i = 0; i < ARRAY_SIZE(possible_ciphers) && + (possible_ciphers[i].valid == 0); i++) { + for (y = 0; y < 3; y++) { + if (spacc_isenabled(&priv->spacc, + possible_ciphers[i].id & 0xFF, + possible_ciphers[i].keylen[y])) { + salg = kmalloc(sizeof(*salg), GFP_KERNEL); + if (!salg) + return -ENOMEM; + + salg->mode = &possible_ciphers[i]; + salg->dev[0] = &spacc_pdev->dev; + + if (possible_ciphers[i].valid == 0) { + rc = spacc_register_cipher(salg, i); + if (rc < 0) { + kfree(salg); + continue; + } + } + dev_dbg(&spacc_pdev->dev, "registered %s\n", + possible_ciphers[i].name); + registered++; + possible_ciphers[i].valid = 1; + } + } + } + + return registered; +} + +int spacc_unregister_cipher_algs(void) +{ + struct spacc_alg *salg, *tmp; + + mutex_lock(&spacc_cipher_alg_mutex); + + list_for_each_entry_safe(salg, tmp, &spacc_cipher_alg_list, list) { + crypto_unregister_skcipher(&salg->alg.skcipher); + list_del(&salg->list); + kfree(salg); + } + + mutex_unlock(&spacc_cipher_alg_mutex); + + return 0; +} From 8ebb14deef0f374f7ca0d34a1ad720ba0a7b79f3 Mon Sep 17 00:00:00 2001 From: Pavitrakumar M Date: Mon, 29 Jul 2024 09:43:46 +0530 Subject: [PATCH 12/96] crypto: spacc - Enable SPAcc AUTODETECT Signed-off-by: Bhoomika K Signed-off-by: Pavitrakumar M Acked-by: Ruud Derwig Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_core.c | 1385 ++++++++++++++++++++++++- 1 file changed, 1384 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/dwc-spacc/spacc_core.c b/drivers/crypto/dwc-spacc/spacc_core.c index 2bad071efd9b..9bc49de06bb2 100644 --- a/drivers/crypto/dwc-spacc/spacc_core.c +++ b/drivers/crypto/dwc-spacc/spacc_core.c @@ -107,6 +107,881 @@ static const unsigned char template[] = { [CRYPTO_MODE_MAC_SM4_CMAC] = 242, }; +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AUTODETECT) +static const struct { + unsigned int min_version; + struct { + int outlen; + unsigned char data[64]; + } test[7]; +} testdata[CRYPTO_MODE_LAST] = { + /* NULL*/ + { .min_version = 0x65, + .test[0].outlen = 0 + }, + + /* AES_ECB*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0xc6, 0xa1, 0x3b, 0x37, + 0x87, 0x8f, 0x5b, 0x82, 0x6f, 0x4f, 0x81, 0x62, 0xa1, + 0xc8, 0xd8, 0x79, }, + .test[3].outlen = 16, .test[3].data = { 0x91, 0x62, 0x51, 0x82, + 0x1c, 0x73, 0xa5, 0x22, 0xc3, 0x96, 0xd6, 0x27, 0x38, + 0x01, 0x96, 0x07, }, + .test[4].outlen = 16, .test[4].data = { 0xf2, 0x90, 0x00, 0xb6, + 0x2a, 0x49, 0x9f, 0xd0, 0xa9, 0xf3, 0x9a, 0x6a, 0xdd, + 0x2e, 0x77, 0x80, }, + }, + + /* AES_CBC*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x0a, 0x94, 0x0b, 0xb5, + 0x41, 0x6e, 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, + 0x53, 0xea, 0x5a, }, + .test[3].outlen = 16, .test[3].data = { 0x00, 0x60, 0xbf, 0xfe, + 0x46, 0x83, 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, + 0xf2, 0x20, 0xae, }, + .test[4].outlen = 16, .test[4].data = { 0x5a, 0x6e, 0x04, 0x57, + 0x08, 0xfb, 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, + 0xc3, 0xa6, 0x92, }, + }, + + /* AES_CTR*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x0a, 0x94, 0x0b, 0xb5, + 0x41, 0x6e, 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, + 0x53, 0xea, 0x5a, }, + .test[3].outlen = 16, .test[3].data = { 0x00, 0x60, 0xbf, 0xfe, + 0x46, 0x83, 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, + 0xf2, 0x20, 0xae, }, + .test[4].outlen = 16, .test[4].data = { 0x5a, 0x6e, 0x04, 0x57, + 0x08, 0xfb, 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, + 0xc3, 0xa6, 0x92, }, + }, + + /* AES_CCM*/ + { .min_version = 0x65, + .test[2].outlen = 32, .test[2].data = { 0x02, 0x63, 0xec, 0x94, + 0x66, 0x18, 0x72, 0x96, 0x9a, 0xda, 0xfd, 0x0f, 0x4b, + 0xa4, 0x0f, 0xdc, 0xa5, 0x09, 0x92, 0x93, 0xb6, 0xb4, + 0x38, 0x34, 0x63, 0x72, 0x50, 0x4c, 0xfc, 0x8a, 0x63, + 0x02, }, + .test[3].outlen = 32, .test[3].data = { 0x29, 0xf7, 0x63, 0xe8, + 0xa1, 0x75, 0xc6, 0xbf, 0xa5, 0x54, 0x94, 0x89, 0x12, + 0x84, 0x45, 0xf5, 0x9b, 0x27, 0xeb, 0xb1, 0xa4, 0x65, + 0x93, 0x6e, 0x5a, 0xc0, 0xa2, 0xa3, 0xe2, 0x6c, 0x46, + 0x29, }, + .test[4].outlen = 32, .test[4].data = { 0x60, 0xf3, 0x10, 0xd5, + 0xc3, 0x85, 0x58, 0x5d, 0x55, 0x16, 0xfb, 0x51, 0x72, + 0xe5, 0x20, 0xcf, 0x8e, 0x87, 0x6d, 0x72, 0xc8, 0x44, + 0xbe, 0x6d, 0xa2, 0xd6, 0xf4, 0xba, 0xec, 0xb4, 0xec, + 0x39, }, + }, + + /* AES_GCM*/ + { .min_version = 0x65, + .test[2].outlen = 32, .test[2].data = { 0x93, 0x6c, 0xa7, 0xce, + 0x66, 0x1b, 0xf7, 0x54, 0x4b, 0xd2, 0x61, 0x8a, 0x36, + 0xa3, 0x70, 0x08, 0xc0, 0xd7, 0xd0, 0x77, 0xc5, 0x64, + 0x76, 0xdb, 0x48, 0x4a, 0x53, 0xe3, 0x6c, 0x93, 0x34, + 0x0f, }, + .test[3].outlen = 32, .test[3].data = { 0xe6, 0xf9, 0x22, 0x9b, + 0x99, 0xb9, 0xc9, 0x0e, 0xd0, 0x33, 0xdc, 0x82, 0xff, + 0xa9, 0xdc, 0x70, 0x4c, 0xcd, 0xc4, 0x1b, 0xa3, 0x5a, + 0x87, 0x5d, 0xd8, 0xef, 0xb6, 0x48, 0xbb, 0x0c, 0x92, + 0x60, }, + .test[4].outlen = 32, .test[4].data = { 0x47, 0x02, 0xd6, 0x1b, + 0xc5, 0xe5, 0xc2, 0x1b, 0x8d, 0x41, 0x97, 0x8b, 0xb1, + 0xe9, 0x78, 0x6d, 0x48, 0x6f, 0x78, 0x81, 0xc7, 0x98, + 0xcc, 0xf5, 0x28, 0xf1, 0x01, 0x7c, 0xe8, 0xf6, 0x09, + 0x78, }, + }, + + /* AES-F8*/ + { .min_version = 0x65, + .test[0].outlen = 0 + }, + + /* AES-XTS*/ + { .min_version = 0x65, + .test[2].outlen = 32, .test[2].data = { 0xa0, 0x1a, 0x6f, 0x09, + 0xfa, 0xef, 0xd2, 0x72, 0xc3, 0x9b, 0xad, 0x35, 0x52, + 0xfc, 0xa1, 0xcb, 0x33, 0x69, 0x51, 0xc5, 0x23, 0xbe, + 0xac, 0xa5, 0x4a, 0xf2, 0xfc, 0x77, 0x71, 0x6f, 0x9a, + 0x86, }, + .test[4].outlen = 32, .test[4].data = { 0x05, 0x45, 0x91, 0x86, + 0xf2, 0x2d, 0x97, 0x93, 0xf3, 0xa0, 0xbb, 0x29, 0xc7, + 0x9c, 0xc1, 0x4c, 0x3b, 0x8f, 0xdd, 0x9d, 0xda, 0xc7, + 0xb5, 0xaa, 0xc2, 0x7c, 0x2e, 0x71, 0xce, 0x7f, 0xce, + 0x0e, }, + }, + + /* AES-CFB*/ + { .min_version = 0x65, + .test[0].outlen = 0 + }, + + /* AES-OFB*/ + { .min_version = 0x65, + .test[0].outlen = 0 + }, + + /* AES-CS1*/ + { .min_version = 0x65, + .test[2].outlen = 31, .test[2].data = { 0x0a, 0x94, 0x0b, 0xb5, + 0x41, 0x6e, 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, + 0x53, 0xea, 0xae, 0xe7, 0x1e, 0xa5, 0x41, 0xd7, 0xae, + 0x4b, 0xeb, 0x60, 0xbe, 0xcc, 0x59, 0x3f, 0xb6, 0x63, + }, + .test[3].outlen = 31, .test[3].data = { 0x00, 0x60, 0xbf, 0xfe, + 0x46, 0x83, 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, + 0xf2, 0x20, 0x2e, 0x84, 0xcb, 0x12, 0xa3, 0x59, 0x17, + 0xb0, 0x9e, 0x25, 0xa2, 0xa2, 0x3d, 0xf1, 0x9f, 0xdc, + }, + .test[4].outlen = 31, .test[4].data = { 0x5a, 0x6e, 0x04, 0x57, + 0x08, 0xfb, 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, + 0xc3, 0xa6, 0xcd, 0xfc, 0x25, 0x35, 0x31, 0x0b, 0xf5, + 0x6b, 0x2e, 0xb7, 0x8a, 0xa2, 0x5a, 0xdd, 0x77, 0x51, + }, + }, + + /* AES-CS2*/ + { .min_version = 0x65, + .test[2].outlen = 31, .test[2].data = { 0xae, 0xe7, 0x1e, 0xa5, + 0x41, 0xd7, 0xae, 0x4b, 0xeb, 0x60, 0xbe, 0xcc, 0x59, + 0x3f, 0xb6, 0x63, 0x0a, 0x94, 0x0b, 0xb5, 0x41, 0x6e, + 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, 0x53, 0xea, + }, + .test[3].outlen = 31, .test[3].data = { 0x2e, 0x84, 0xcb, 0x12, + 0xa3, 0x59, 0x17, 0xb0, 0x9e, 0x25, 0xa2, 0xa2, 0x3d, + 0xf1, 0x9f, 0xdc, 0x00, 0x60, 0xbf, 0xfe, 0x46, 0x83, + 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, 0xf2, 0x20, + }, + .test[4].outlen = 31, .test[4].data = { 0xcd, 0xfc, 0x25, 0x35, + 0x31, 0x0b, 0xf5, 0x6b, 0x2e, 0xb7, 0x8a, 0xa2, 0x5a, + 0xdd, 0x77, 0x51, 0x5a, 0x6e, 0x04, 0x57, 0x08, 0xfb, + 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, 0xc3, 0xa6, + }, + }, + + /* AES-CS3*/ + { .min_version = 0x65, + .test[2].outlen = 31, .test[2].data = { 0xae, 0xe7, 0x1e, 0xa5, + 0x41, 0xd7, 0xae, 0x4b, 0xeb, 0x60, 0xbe, 0xcc, 0x59, + 0x3f, 0xb6, 0x63, 0x0a, 0x94, 0x0b, 0xb5, 0x41, 0x6e, + 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, 0x53, 0xea, + }, + .test[3].outlen = 31, .test[3].data = { 0x2e, 0x84, 0xcb, 0x12, + 0xa3, 0x59, 0x17, 0xb0, 0x9e, 0x25, 0xa2, 0xa2, 0x3d, + 0xf1, 0x9f, 0xdc, 0x00, 0x60, 0xbf, 0xfe, 0x46, 0x83, + 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, 0xf2, 0x20, + }, + .test[4].outlen = 31, .test[4].data = { 0xcd, 0xfc, 0x25, 0x35, + 0x31, 0x0b, 0xf5, 0x6b, 0x2e, 0xb7, 0x8a, 0xa2, 0x5a, + 0xdd, 0x77, 0x51, 0x5a, 0x6e, 0x04, 0x57, 0x08, 0xfb, + 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, 0xc3, 0xa6, + }, + }, + + /* MULTI2*/ + { .min_version = 0x65, + .test[0].outlen = 0 + }, + { .min_version = 0x65, + .test[0].outlen = 0 + }, + { .min_version = 0x65, + .test[0].outlen = 0 + }, + { .min_version = 0x65, + .test[0].outlen = 0 + }, + + /* 3DES_CBC*/ + { .min_version = 0x65, + .test[3].outlen = 16, .test[3].data = { 0x58, 0xed, 0x24, 0x8f, + 0x77, 0xf6, 0xb1, 0x9e, 0x47, 0xd9, 0xb7, 0x4a, 0x4f, + 0x5a, 0xe6, 0x6d, } + }, + + /* 3DES_ECB*/ + { .min_version = 0x65, + .test[3].outlen = 16, .test[3].data = { 0x89, 0x4b, 0xc3, 0x08, + 0x54, 0x26, 0xa4, 0x41, 0x89, 0x4b, 0xc3, 0x08, 0x54, + 0x26, 0xa4, 0x41, } + }, + + /* DES_CBC*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0xe1, 0xb2, 0x46, 0xe5, + 0xa7, 0xc7, 0x4c, 0xbc, 0xd5, 0xf0, 0x8e, 0x25, 0x3b, + 0xfa, 0x23, 0x80, } + }, + + /* DES_ECB*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0xa5, 0x17, 0x3a, + 0xd5, 0x95, 0x7b, 0x43, 0x70, 0xa5, 0x17, 0x3a, 0xd5, + 0x95, 0x7b, 0x43, 0x70, } + }, + + /* KASUMI_ECB*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x04, 0x7d, 0x5d, + 0x2c, 0x8c, 0x2e, 0x91, 0xb3, 0x04, 0x7d, 0x5d, 0x2c, + 0x8c, 0x2e, 0x91, 0xb3, } }, + + /* KASUMI_F8*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0xfc, 0xf7, 0x45, + 0xee, 0x1d, 0xbb, 0xa4, 0x57, 0xa7, 0x45, 0xdc, 0x6b, + 0x2a, 0x1b, 0x50, 0x88, } + }, + + /* SNOW3G UEA2*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x95, 0xd3, 0xc8, + 0x13, 0xc0, 0x20, 0x24, 0xa3, 0x76, 0x24, 0xd1, 0x98, + 0xb6, 0x67, 0x4d, 0x4c, } + }, + + /* ZUC UEA3*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0xda, 0xdf, 0xb6, + 0xa2, 0xac, 0x9d, 0xba, 0xfe, 0x18, 0x9c, 0x0c, 0x75, + 0x79, 0xc6, 0xe0, 0x4e, } + }, + + /* CHACHA20_STREAM*/ + { .min_version = 0x65, + .test[4].outlen = 16, .test[4].data = { 0x55, 0xdf, 0x91, + 0xe9, 0x27, 0x01, 0x37, 0x69, 0xdb, 0x38, 0xd4, 0x28, + 0x01, 0x79, 0x76, 0x64 } + }, + + /* CHACHA20_POLY1305 (AEAD)*/ + { .min_version = 0x65, + .test[4].outlen = 16, .test[4].data = { 0x89, 0xfb, 0x08, + 0x00, 0x29, 0x17, 0xa5, 0x40, 0xb7, 0x83, 0x3f, 0xf3, + 0x98, 0x1d, 0x0e, 0x63 } + }, + + /* SM4_ECB 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x1e, 0x96, 0x34, + 0xb7, 0x70, 0xf9, 0xae, 0xba, 0xa9, 0x34, 0x4f, 0x5a, + 0xff, 0x9f, 0x82, 0xa3 } + }, + + /* SM4_CBC 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, + 0x3e, 0xe0, 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, + 0x8f, 0xd0, 0x52, 0x8d } + }, + + /* SM4_CFB 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, + 0x3e, 0xe0, 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, + 0x8f, 0xd0, 0x52, 0x8d } + }, + + /* SM4_OFB 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, 0x3e, 0xe0, + 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, 0x8f, 0xd0, 0x52, + 0x8d } + }, + + /* SM4_CTR 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, 0x3e, 0xe0, + 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, 0x8f, 0xd0, 0x52, + 0x8d } + }, + + /* SM4_CCM 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x8e, 0x25, 0x5a, + 0x13, 0xc7, 0x43, 0x4d, 0x95, 0xef, 0x14, 0x15, 0x11, + 0xd0, 0xb9, 0x60, 0x5b } + }, + + /* SM4_GCM 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x97, 0x46, 0xde, + 0xfb, 0xc9, 0x6a, 0x85, 0x00, 0xff, 0x9c, 0x74, 0x4d, + 0xd1, 0xbb, 0xf9, 0x66 } + }, + + /* SM4_F8 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x77, 0x30, 0xff, + 0x70, 0x46, 0xbc, 0xf4, 0xe3, 0x11, 0xf6, 0x27, 0xe2, + 0xff, 0xd7, 0xc4, 0x2e } + }, + + /* SM4_XTS 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x05, 0x3f, 0xb6, + 0xe9, 0xb1, 0xff, 0x09, 0x4f, 0x9d, 0x69, 0x4d, 0xc2, + 0xb6, 0xa1, 0x15, 0xde } + }, + + /* SM4_CS1 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, + 0x3e, 0xe0, 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, + 0x8f, 0xd0, 0x52, 0xa0 } + }, + + /* SM4_CS2 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0xa0, 0x1c, 0xfe, + 0x91, 0xaa, 0x7e, 0xf1, 0x75, 0x6a, 0xe8, 0xbc, 0xe1, + 0x55, 0x08, 0xda, 0x71 } + }, + + /* SM4_CS3 128*/ + { .min_version = 0x65, + .test[2].outlen = 16, .test[2].data = { 0xa0, 0x1c, 0xfe, + 0x91, 0xaa, 0x7e, 0xf1, 0x75, 0x6a, 0xe8, 0xbc, 0xe1, + 0x55, 0x08, 0xda, 0x71 } + }, + + /* hashes ... note they use the 2nd keysize + * array so the indecies mean different sizes!!! + */ + + /* MD5 HASH/HMAC*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0x70, 0xbc, 0x8f, 0x4b, + 0x72, 0xa8, 0x69, 0x21, 0x46, 0x8b, 0xf8, 0xe8, 0x44, + 0x1d, 0xce, 0x51, } + }, + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0xb6, 0x39, 0xc8, 0x73, + 0x16, 0x38, 0x61, 0x8b, 0x70, 0x79, 0x72, 0xaa, 0x6e, + 0x96, 0xcf, 0x90, }, + .test[4].outlen = 16, .test[4].data = { 0xb7, 0x79, 0x68, 0xea, + 0x17, 0x32, 0x1e, 0x32, 0x13, 0x90, 0x6c, 0x2e, 0x9f, + 0xd5, 0xc8, 0xb3, }, + .test[5].outlen = 16, .test[5].data = { 0x80, 0x3e, 0x0a, 0x2f, + 0x8a, 0xd8, 0x31, 0x8f, 0x8e, 0x12, 0x28, 0x86, 0x22, + 0x59, 0x6b, 0x05, }, + }, + /* SHA1*/ + { .min_version = 0x65, + .test[1].outlen = 20, .test[1].data = { 0xde, 0x8a, 0x84, 0x7b, + 0xff, 0x8c, 0x34, 0x3d, 0x69, 0xb8, 0x53, 0xa2, 0x15, + 0xe6, 0xee, 0x77, 0x5e, 0xf2, 0xef, 0x96, } + }, + { .min_version = 0x65, + .test[1].outlen = 20, .test[1].data = { 0xf8, 0x54, 0x60, 0x50, + 0x49, 0x56, 0xd1, 0xcd, 0x55, 0x5c, 0x5d, 0xcd, 0x24, + 0x33, 0xbf, 0xdc, 0x5c, 0x99, 0x54, 0xc8, }, + .test[4].outlen = 20, .test[4].data = { 0x66, 0x3f, 0x3a, 0x3c, + 0x08, 0xb6, 0x87, 0xb2, 0xd3, 0x0c, 0x5a, 0xa7, 0xcc, + 0x5c, 0xc3, 0x99, 0xb2, 0xb4, 0x58, 0x55, }, + .test[5].outlen = 20, .test[5].data = { 0x9a, 0x28, 0x54, 0x2f, + 0xaf, 0xa7, 0x0b, 0x37, 0xbe, 0x2d, 0x3e, 0xd9, 0xd4, + 0x70, 0xbc, 0xdc, 0x0b, 0x54, 0x20, 0x06, }, + }, + /* SHA224_HASH*/ + { .min_version = 0x65, + .test[1].outlen = 28, .test[1].data = { 0xb3, 0x38, 0xc7, 0x6b, + 0xcf, 0xfa, 0x1a, 0x0b, 0x3e, 0xad, 0x8d, 0xe5, 0x8d, + 0xfb, 0xff, 0x47, 0xb6, 0x3a, 0xb1, 0x15, 0x0e, 0x10, + 0xd8, 0xf1, 0x7f, 0x2b, 0xaf, 0xdf, } + }, + { .min_version = 0x65, + .test[1].outlen = 28, .test[1].data = { 0xf3, 0xb4, 0x33, 0x78, + 0x53, 0x4c, 0x0c, 0x4a, 0x1e, 0x31, 0xc2, 0xce, 0xda, + 0xc8, 0xfe, 0x74, 0x4a, 0xd2, 0x9b, 0x7c, 0x1d, 0x2f, + 0x5e, 0xa1, 0xaa, 0x31, 0xb9, 0xf5, }, + .test[4].outlen = 28, .test[4].data = { 0x4b, 0x6b, 0x3f, 0x9a, + 0x66, 0x47, 0x45, 0xe2, 0x60, 0xc9, 0x53, 0x86, 0x7a, + 0x34, 0x65, 0x7d, 0xe2, 0x24, 0x06, 0xcc, 0xf9, 0x17, + 0x20, 0x5d, 0xc2, 0xb6, 0x97, 0x9a, }, + .test[5].outlen = 28, .test[5].data = { 0x90, 0xb0, 0x6e, 0xee, + 0x21, 0x57, 0x38, 0xc7, 0x65, 0xbb, 0x9a, 0xf5, 0xb4, + 0x31, 0x0a, 0x0e, 0xe5, 0x64, 0xc4, 0x49, 0x9d, 0xbd, + 0xe9, 0xf7, 0xac, 0x9f, 0xf8, 0x05, }, + }, + + /* SHA256_HASH*/ + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0x66, 0x68, 0x7a, 0xad, + 0xf8, 0x62, 0xbd, 0x77, 0x6c, 0x8f, 0xc1, 0x8b, 0x8e, + 0x9f, 0x8e, 0x20, 0x08, 0x97, 0x14, 0x85, 0x6e, 0xe2, + 0x33, 0xb3, 0x90, 0x2a, 0x59, 0x1d, 0x0d, 0x5f, 0x29, + 0x25, } + }, + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0x75, 0x40, 0x84, 0x49, + 0x54, 0x0a, 0xf9, 0x80, 0x99, 0xeb, 0x93, 0x6b, 0xf6, + 0xd3, 0xff, 0x41, 0x05, 0x47, 0xcc, 0x82, 0x62, 0x76, + 0x32, 0xf3, 0x43, 0x74, 0x70, 0x54, 0xe2, 0x3b, 0xc0, + 0x90, }, + .test[4].outlen = 32, .test[4].data = { 0x41, 0x6c, 0x53, 0x92, + 0xb9, 0xf3, 0x6d, 0xf1, 0x88, 0xe9, 0x0e, 0xb1, 0x4d, + 0x17, 0xbf, 0x0d, 0xa1, 0x90, 0xbf, 0xdb, 0x7f, 0x1f, + 0x49, 0x56, 0xe6, 0xe5, 0x66, 0xa5, 0x69, 0xc8, 0xb1, + 0x5c, }, + .test[5].outlen = 32, .test[5].data = { 0x49, 0x1f, 0x58, 0x3b, + 0x05, 0xe2, 0x3a, 0x72, 0x1d, 0x11, 0x6d, 0xc1, 0x08, + 0xa0, 0x3f, 0x30, 0x37, 0x98, 0x36, 0x8a, 0x49, 0x4c, + 0x21, 0x1d, 0x56, 0xa5, 0x2a, 0xf3, 0x68, 0x28, 0xb7, + 0x69, }, + }, + /* SHA384_HASH*/ + { .min_version = 0x65, + .test[1].outlen = 48, .test[1].data = { 0xa3, 0x8f, 0xff, 0x4b, + 0xa2, 0x6c, 0x15, 0xe4, 0xac, 0x9c, 0xde, 0x8c, 0x03, + 0x10, 0x3a, 0xc8, 0x90, 0x80, 0xfd, 0x47, 0x54, 0x5f, + 0xde, 0x94, 0x46, 0xc8, 0xf1, 0x92, 0x72, 0x9e, 0xab, + 0x7b, 0xd0, 0x3a, 0x4d, 0x5c, 0x31, 0x87, 0xf7, 0x5f, + 0xe2, 0xa7, 0x1b, 0x0e, 0xe5, 0x0a, 0x4a, 0x40, } + }, + { .min_version = 0x65, + .test[1].outlen = 48, .test[1].data = { 0x6c, 0xd8, 0x89, 0xa0, + 0xca, 0x54, 0xa6, 0x1d, 0x24, 0xc4, 0x1d, 0xa1, 0x77, + 0x50, 0xd6, 0xf2, 0xf3, 0x43, 0x23, 0x0d, 0xb1, 0xf5, + 0xf7, 0xfc, 0xc0, 0x8c, 0xf6, 0xdf, 0x3c, 0x61, 0xfc, + 0x8a, 0xb9, 0xda, 0x12, 0x75, 0x97, 0xac, 0x51, 0x88, + 0x59, 0x19, 0x44, 0x13, 0xc0, 0x78, 0xa5, 0xa8, }, + .test[4].outlen = 48, .test[4].data = { 0x0c, 0x91, 0x36, 0x46, + 0xd9, 0x17, 0x81, 0x46, 0x1d, 0x42, 0xb1, 0x00, 0xaa, + 0xfa, 0x26, 0x92, 0x9f, 0x05, 0xc0, 0x91, 0x8e, 0x20, + 0xd7, 0x75, 0x9d, 0xd2, 0xc8, 0x9b, 0x02, 0x18, 0x20, + 0x1f, 0xdd, 0xa3, 0x32, 0xe3, 0x1e, 0xa4, 0x2b, 0xc3, + 0xc8, 0xb9, 0xb1, 0x53, 0x4e, 0x6a, 0x49, 0xd2, }, + .test[5].outlen = 48, .test[5].data = { 0x84, 0x78, 0xd2, 0xf1, + 0x44, 0x95, 0x6a, 0x22, 0x2d, 0x08, 0x19, 0xe8, 0xea, + 0x61, 0xb4, 0x86, 0xe8, 0xc6, 0xb0, 0x40, 0x51, 0x28, + 0x22, 0x54, 0x48, 0xc0, 0x70, 0x09, 0x81, 0xf9, 0xf5, + 0x47, 0x9e, 0xb3, 0x2c, 0x69, 0x19, 0xd5, 0x8d, 0x03, + 0x5d, 0x24, 0xca, 0x90, 0xa6, 0x9d, 0x80, 0x2a, }, + .test[6].outlen = 48, .test[6].data = { 0x0e, 0x68, 0x17, 0x31, + 0x01, 0xa8, 0x28, 0x0a, 0x4e, 0x47, 0x22, 0xa6, 0x89, + 0xf0, 0xc6, 0xcd, 0x4e, 0x8c, 0x19, 0x4c, 0x44, 0x3d, + 0xb5, 0xa5, 0xf9, 0xfe, 0xea, 0xc7, 0x84, 0x0b, 0x57, + 0x0d, 0xd4, 0xe4, 0x8a, 0x3f, 0x68, 0x31, 0x20, 0xd9, + 0x1f, 0xc4, 0xa3, 0x76, 0xcf, 0xdd, 0x07, 0xa6, }, + }, + /* SHA512_HASH */ + { .min_version = 0x65, + .test[1].outlen = 64, .test[1].data = { 0x50, 0x46, 0xad, 0xc1, + 0xdb, 0xa8, 0x38, 0x86, 0x7b, 0x2b, 0xbb, 0xfd, 0xd0, + 0xc3, 0x42, 0x3e, 0x58, 0xb5, 0x79, 0x70, 0xb5, 0x26, + 0x7a, 0x90, 0xf5, 0x79, 0x60, 0x92, 0x4a, 0x87, 0xf1, + 0x96, 0x0a, 0x6a, 0x85, 0xea, 0xa6, 0x42, 0xda, 0xc8, + 0x35, 0x42, 0x4b, 0x5d, 0x7c, 0x8d, 0x63, 0x7c, 0x00, + 0x40, 0x8c, 0x7a, 0x73, 0xda, 0x67, 0x2b, 0x7f, 0x49, + 0x85, 0x21, 0x42, 0x0b, 0x6d, 0xd3, } + }, + { .min_version = 0x65, + .test[1].outlen = 64, .test[1].data = { 0xec, 0xfd, 0x83, 0x74, + 0xc8, 0xa9, 0x2f, 0xd7, 0x71, 0x94, 0xd1, 0x1e, 0xe7, + 0x0f, 0x0f, 0x5e, 0x11, 0x29, 0x58, 0xb8, 0x36, 0xc6, + 0x39, 0xbc, 0xd6, 0x88, 0x6e, 0xdb, 0xc8, 0x06, 0x09, + 0x30, 0x27, 0xaa, 0x69, 0xb9, 0x2a, 0xd4, 0x67, 0x06, + 0x5c, 0x82, 0x8e, 0x90, 0xe9, 0x3e, 0x55, 0x88, 0x7d, + 0xb2, 0x2b, 0x48, 0xa2, 0x28, 0x92, 0x6c, 0x0f, 0xf1, + 0x57, 0xb5, 0xd0, 0x06, 0x1d, 0xf3, }, + .test[4].outlen = 64, .test[4].data = { 0x47, 0x88, 0x91, 0xe9, + 0x12, 0x3e, 0xfd, 0xdc, 0x26, 0x29, 0x08, 0xd6, 0x30, + 0x8f, 0xcc, 0xb6, 0x93, 0x30, 0x58, 0x69, 0x4e, 0x81, + 0xee, 0x9d, 0xb6, 0x0f, 0xc5, 0x54, 0xe6, 0x7c, 0x84, + 0xc5, 0xbc, 0x89, 0x99, 0xf0, 0xf3, 0x7f, 0x6f, 0x3f, + 0xf5, 0x04, 0x2c, 0xdf, 0x76, 0x72, 0x6a, 0xbe, 0x28, + 0x3b, 0xb8, 0x05, 0xb3, 0x47, 0x45, 0xf5, 0x7f, 0xb1, + 0x21, 0x2d, 0xe0, 0x8d, 0x1e, 0x29, }, + .test[5].outlen = 64, .test[5].data = { 0x7e, 0x55, 0xda, 0x88, + 0x28, 0xc1, 0x6e, 0x9a, 0x6a, 0x99, 0xa0, 0x37, 0x68, + 0xf0, 0x28, 0x5e, 0xe2, 0xbe, 0x00, 0xac, 0x76, 0x89, + 0x76, 0xcc, 0x5d, 0x98, 0x1b, 0x32, 0x1a, 0x14, 0xc4, + 0x2e, 0x9c, 0xe4, 0xf3, 0x3f, 0x5f, 0xa0, 0xae, 0x95, + 0x16, 0x0b, 0x14, 0xf5, 0xf5, 0x45, 0x29, 0xd8, 0xc9, + 0x43, 0xf2, 0xa9, 0xbc, 0xdc, 0x03, 0x81, 0x0d, 0x36, + 0x2f, 0xb1, 0x22, 0xe8, 0x13, 0xf8, }, + .test[6].outlen = 64, .test[6].data = { 0x5d, 0xc4, 0x80, 0x90, + 0x6b, 0x00, 0x17, 0x04, 0x34, 0x63, 0x93, 0xf1, 0xad, + 0x9a, 0x3e, 0x13, 0x37, 0x6b, 0x86, 0xd7, 0xc4, 0x2b, + 0x22, 0x9c, 0x2e, 0xf2, 0x1d, 0xde, 0x35, 0x39, 0x03, + 0x3f, 0x2b, 0x3a, 0xc3, 0x49, 0xb3, 0x32, 0x86, 0x63, + 0x6b, 0x0f, 0x27, 0x95, 0x97, 0xe5, 0xe7, 0x2b, 0x9b, + 0x80, 0xea, 0x94, 0x4d, 0x84, 0x2e, 0x39, 0x44, 0x8f, + 0x56, 0xe3, 0xcd, 0xa7, 0x12, 0x3e, }, + }, + /* SHA512_224_HASH */ + { .min_version = 0x65, + .test[1].outlen = 28, .test[1].data = { 0x9e, 0x7d, 0x60, 0x80, + 0xde, 0xf4, 0xe1, 0xcc, 0xf4, 0xae, 0xaa, 0xc6, 0xf7, + 0xfa, 0xd0, 0x08, 0xd0, 0x60, 0xa6, 0xcf, 0x87, 0x06, + 0x20, 0x38, 0xd6, 0x16, 0x67, 0x74, } + }, + { .min_version = 0x65, + .test[1].outlen = 28, .test[1].data = { 0xff, 0xfb, 0x43, 0x27, + 0xdd, 0x2e, 0x39, 0xa0, 0x18, 0xa8, 0xaf, 0xde, 0x84, + 0x0b, 0x5d, 0x0f, 0x3d, 0xdc, 0xc6, 0x17, 0xd1, 0xb6, + 0x2f, 0x8c, 0xf8, 0x7e, 0x34, 0x34, }, + .test[4].outlen = 28, .test[4].data = { 0x00, 0x19, 0xe2, 0x2d, + 0x44, 0x80, 0x2d, 0xd8, 0x1c, 0x57, 0xf5, 0x57, 0x92, + 0x08, 0x13, 0xe7, 0x9d, 0xbb, 0x2b, 0xc2, 0x8d, 0x77, + 0xc1, 0xff, 0x71, 0x4c, 0xf0, 0xa9, }, + .test[5].outlen = 28, .test[5].data = { 0x6a, 0xc4, 0xa8, 0x73, + 0x21, 0x54, 0xb2, 0x82, 0xee, 0x89, 0x8d, 0x45, 0xd4, + 0xe3, 0x76, 0x3e, 0x04, 0x03, 0xc9, 0x71, 0xee, 0x01, + 0x25, 0xd2, 0x7b, 0xa1, 0x20, 0xc4, }, + .test[6].outlen = 28, .test[6].data = { 0x0f, 0x98, 0x15, 0x9b, + 0x11, 0xca, 0x60, 0xc7, 0x82, 0x39, 0x1a, 0x50, 0x8c, + 0xe4, 0x79, 0xfa, 0xa8, 0x0e, 0xc7, 0x12, 0xfd, 0x8c, + 0x9c, 0x99, 0x7a, 0xe8, 0x7e, 0x92, }, + }, + /* SHA512_256_HASH*/ + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0xaf, 0x13, 0xc0, 0x48, + 0x99, 0x12, 0x24, 0xa5, 0xe4, 0xc6, 0x64, 0x44, 0x6b, + 0x68, 0x8a, 0xaf, 0x48, 0xfb, 0x54, 0x56, 0xdb, 0x36, + 0x29, 0x60, 0x1b, 0x00, 0xec, 0x16, 0x0c, 0x74, 0xe5, + 0x54, } + }, + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0x3a, 0x2c, 0xd0, 0x2b, + 0xfa, 0xa6, 0x72, 0xe4, 0xf1, 0xab, 0x0a, 0x3e, 0x70, + 0xe4, 0x88, 0x1a, 0x92, 0xe1, 0x3b, 0x64, 0x5a, 0x9b, + 0xed, 0xb3, 0x97, 0xc0, 0x17, 0x1f, 0xd4, 0x05, 0xf1, + 0x72, }, + .test[4].outlen = 32, .test[4].data = { 0x6f, 0x2d, 0xae, 0xc6, + 0xe4, 0xa6, 0x5b, 0x52, 0x0f, 0x26, 0x16, 0xf6, 0xa9, + 0xc1, 0x23, 0xc2, 0xb3, 0x67, 0xfc, 0x69, 0xac, 0x73, + 0x87, 0xa2, 0x5b, 0x6c, 0x44, 0xad, 0xc5, 0x26, 0x2b, + 0x10, }, + .test[5].outlen = 32, .test[5].data = { 0x63, 0xe7, 0xb8, 0xd1, + 0x76, 0x33, 0x56, 0x29, 0xba, 0x99, 0x86, 0x42, 0x0d, + 0x4f, 0xf7, 0x54, 0x8c, 0xb9, 0x39, 0xf2, 0x72, 0x1d, + 0x0e, 0x9d, 0x80, 0x67, 0xd9, 0xab, 0x15, 0xb0, 0x68, + 0x18, }, + .test[6].outlen = 32, .test[6].data = { 0x64, 0x78, 0x56, 0xd7, + 0xaf, 0x5b, 0x56, 0x08, 0xf1, 0x44, 0xf7, 0x4f, 0xa1, + 0xa1, 0x13, 0x79, 0x6c, 0xb1, 0x31, 0x11, 0xf3, 0x75, + 0xf4, 0x8c, 0xb4, 0x9f, 0xbf, 0xb1, 0x60, 0x38, 0x3d, + 0x28, }, + }, + + /* AESXCBC*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0x35, 0xd9, 0xdc, 0xdb, + 0x82, 0x9f, 0xec, 0x33, 0x52, 0xe7, 0xbf, 0x10, 0xb8, + 0x4b, 0xe4, 0xa5, }, + .test[3].outlen = 16, .test[3].data = { 0x39, 0x6f, 0x99, 0xb5, + 0x43, 0x33, 0x67, 0x4e, 0xd4, 0x45, 0x8f, 0x80, 0x77, + 0xe4, 0xd4, 0x14, }, + .test[4].outlen = 16, .test[4].data = { 0x73, 0xd4, 0x7c, 0x38, + 0x37, 0x4f, 0x73, 0xd0, 0x78, 0xa8, 0xc6, 0xec, 0x05, + 0x67, 0xca, 0x5e, }, + }, + + /* AESCMAC*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0x15, 0xbe, 0x1b, 0xfd, + 0x8c, 0xbb, 0xaf, 0x8b, 0x51, 0x9a, 0x64, 0x3b, 0x1b, + 0x46, 0xc1, 0x8f, }, + .test[3].outlen = 16, .test[3].data = { 0x4e, 0x02, 0xd6, 0xec, + 0x92, 0x75, 0x88, 0xb4, 0x3e, 0x83, 0xa7, 0xac, 0x32, + 0xb6, 0x2b, 0xdb, }, + .test[4].outlen = 16, .test[4].data = { 0xa7, 0x37, 0x01, 0xbe, + 0xe8, 0xce, 0xed, 0x44, 0x49, 0x4a, 0xbb, 0xf6, 0x9e, + 0xd9, 0x31, 0x3e, }, + }, + + /* KASUMIF9*/ + { .min_version = 0x65, + .test[1].outlen = 4, .test[1].data = { 0x5b, 0x26, 0x81, 0x06 + } + }, + + /* SNOW3G UIA2*/ + { .min_version = 0x65, + .test[1].outlen = 4, .test[1].data = { 0x08, 0xed, 0x2c, 0x76, + } + }, + + /* ZUC UIA3*/ + { .min_version = 0x65, + .test[1].outlen = 4, .test[1].data = { 0x6a, 0x2b, 0x4c, 0x3a, + } + }, + + /* POLY1305*/ + { .min_version = 0x65, + .test[4].outlen = 16, .test[4].data = { 0xef, 0x91, 0x06, 0x4e, + 0xce, 0x99, 0x9c, 0x4e, 0xfd, 0x05, 0x6a, 0x8c, 0xe6, + 0x18, 0x23, 0xad } + }, + + /* SSLMAC MD5*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0x0e, 0xf4, 0xca, 0x32, + 0x32, 0x40, 0x1d, 0x1b, 0xaa, 0xfd, 0x6d, 0xa8, 0x01, + 0x79, 0xed, 0xcd, }, + }, + + /* SSLMAC_SHA1*/ + { .min_version = 0x65, + .test[2].outlen = 20, .test[2].data = { 0x05, 0x9d, 0x99, 0xb4, + 0xf3, 0x03, 0x1e, 0xc5, 0x24, 0xbf, 0xec, 0xdf, 0x64, + 0x8e, 0x37, 0x2e, 0xf0, 0xef, 0x93, 0xa0, }, + }, + + /* CRC32*/ + { .min_version = 0x65, + .test[0].outlen = 0 + }, + + /* TKIP-MIC*/ + { .min_version = 0x65, + .test[0].outlen = 8, .test[0].data = { 0x16, 0xfb, 0xa0, + 0x0e, 0xe2, 0xab, 0x6c, 0x97, } + }, + + /* SHA3-224*/ + { .min_version = 0x65, + .test[1].outlen = 28, .test[1].data = { 0x73, 0xe0, 0x87, + 0xae, 0x12, 0x71, 0xb2, 0xc5, 0xf6, 0x85, 0x46, 0xc9, + 0x3a, 0xb4, 0x25, 0x14, 0xa6, 0x9e, 0xef, 0x25, 0x2b, + 0xfd, 0xd1, 0x37, 0x55, 0x74, 0x8a, 0x00, } + }, + + /* SHA3-256*/ + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0x9e, 0x62, 0x91, 0x97, + 0x0c, 0xb4, 0x4d, 0xd9, 0x40, 0x08, 0xc7, 0x9b, 0xca, + 0xf9, 0xd8, 0x6f, 0x18, 0xb4, 0xb4, 0x9b, 0xa5, 0xb2, + 0xa0, 0x47, 0x81, 0xdb, 0x71, 0x99, 0xed, 0x3b, 0x9e, + 0x4e, } + }, + + /* SHA3-384*/ + { .min_version = 0x65, + .test[1].outlen = 48, .test[1].data = { 0x4b, 0xda, 0xab, + 0xf7, 0x88, 0xd3, 0xad, 0x1a, 0xd8, 0x3d, 0x6d, 0x93, + 0xc7, 0xe4, 0x49, 0x37, 0xc2, 0xe6, 0x49, 0x6a, 0xf2, + 0x3b, 0xe3, 0x35, 0x4d, 0x75, 0x69, 0x87, 0xf4, 0x51, + 0x60, 0xfc, 0x40, 0x23, 0xbd, 0xa9, 0x5e, 0xcd, 0xcb, + 0x3c, 0x7e, 0x31, 0xa6, 0x2f, 0x72, 0x6d, 0x70, 0x2c, + } + }, + + /* SHA3-512*/ + { .min_version = 0x65, + .test[1].outlen = 64, .test[1].data = { 0xad, 0x56, 0xc3, 0x5c, + 0xab, 0x50, 0x63, 0xb9, 0xe7, 0xea, 0x56, 0x83, 0x14, + 0xec, 0x81, 0xc4, 0x0b, 0xa5, 0x77, 0xaa, 0xe6, 0x30, + 0xde, 0x90, 0x20, 0x04, 0x00, 0x9e, 0x88, 0xf1, 0x8d, + 0xa5, 0x7b, 0xbd, 0xfd, 0xaa, 0xa0, 0xfc, 0x18, 0x9c, + 0x66, 0xc8, 0xd8, 0x53, 0x24, 0x8b, 0x6b, 0x11, 0x88, + 0x44, 0xd5, 0x3f, 0x7d, 0x0b, 0xa1, 0x1d, 0xe0, 0xf3, + 0xbf, 0xaf, 0x4c, 0xdd, 0x9b, 0x3f, } + }, + + /* SHAKE128*/ + { .min_version = 0x65, + .test[4].outlen = 16, .test[4].data = { 0x24, 0xa7, 0xca, + 0x4b, 0x75, 0xe3, 0x89, 0x8d, 0x4f, 0x12, 0xe7, 0x4d, + 0xea, 0x8c, 0xbb, 0x65 } + }, + + /* SHAKE256*/ + { .min_version = 0x65, + .test[4].outlen = 32, .test[4].data = { 0xf5, 0x97, 0x7c, + 0x82, 0x83, 0x54, 0x6a, 0x63, 0x72, 0x3b, 0xc3, 0x1d, + 0x26, 0x19, 0x12, 0x4f, + 0x11, 0xdb, 0x46, 0x58, 0x64, 0x33, 0x36, 0x74, 0x1d, + 0xf8, 0x17, 0x57, 0xd5, 0xad, 0x30, 0x62 } + }, + + /* CSHAKE128*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0xe0, 0x6f, 0xd8, + 0x50, 0x57, 0x6f, 0xe4, 0xfa, 0x7e, 0x13, 0x42, 0xb5, + 0xf8, 0x13, 0xeb, 0x23 } + }, + + /* CSHAKE256*/ + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0xf3, 0xf2, 0xb5, + 0x47, 0xf2, 0x16, 0xba, 0x6f, 0x49, 0x83, 0x3e, 0xad, + 0x1e, 0x46, 0x85, 0x54, + 0xd0, 0xd7, 0xf9, 0xc6, 0x7e, 0xe9, 0x27, 0xc6, 0xc3, + 0xc3, 0xdb, 0x91, 0xdb, 0x97, 0x04, 0x0f } + }, + + /* KMAC128*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0x6c, 0x3f, 0x29, + 0xfe, 0x01, 0x96, 0x59, 0x36, 0xb7, 0xae, 0xb7, 0xff, + 0x71, 0xe0, 0x3d, 0xff }, + .test[4].outlen = 16, .test[4].data = { 0x58, 0xd9, 0x8d, + 0xe8, 0x1f, 0x64, 0xb4, 0xa3, 0x9f, 0x63, 0xaf, 0x21, + 0x99, 0x03, 0x97, 0x06 }, + .test[5].outlen = 16, .test[5].data = { 0xf8, 0xf9, 0xb7, + 0xa4, 0x05, 0x3d, 0x90, 0x7c, 0xf2, 0xa1, 0x7c, 0x34, + 0x39, 0xc2, 0x87, 0x4b }, + .test[6].outlen = 16, .test[6].data = { 0xef, 0x4a, 0xd5, + 0x1d, 0xd7, 0x83, 0x56, 0xd3, 0xa8, 0x3c, 0xf5, 0xf8, + 0xd1, 0x12, 0xf4, 0x44 } + }, + + /* KMAC256*/ + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0x0d, 0x86, 0xfa, + 0x92, 0x92, 0xe4, 0x77, 0x24, 0x6a, 0xcc, 0x79, 0xa0, + 0x1e, 0xb4, 0xc3, 0xac, + 0xfc, 0x56, 0xbc, 0x63, 0xcc, 0x1b, 0x6e, 0xf6, 0xc8, + 0x99, 0xa5, 0x3a, 0x38, 0x14, 0xa2, 0x40 }, + .test[4].outlen = 32, .test[4].data = { 0xad, 0x99, 0xed, + 0x20, 0x1f, 0xbe, 0x45, 0x07, 0x3d, 0xf4, 0xae, 0x9f, + 0xc2, 0xd8, 0x06, 0x18, + 0x31, 0x4e, 0x8c, 0xb6, 0x33, 0xe8, 0x31, 0x36, 0x00, + 0xdd, 0x42, 0x20, 0xda, 0x2b, 0xd5, 0x2b }, + .test[5].outlen = 32, .test[5].data = { 0xf9, 0xc6, 0x2b, + 0x17, 0xa0, 0x04, 0xd9, 0xf2, 0x6c, 0xbf, 0x5d, 0xa5, + 0x9a, 0xd7, 0x36, 0x1d, + 0xad, 0x66, 0x6b, 0x3d, 0xb1, 0x52, 0xd3, 0x81, 0x39, + 0x20, 0xd4, 0xf0, 0x43, 0x72, 0x2c, 0xb7 }, + .test[6].outlen = 32, .test[6].data = { 0xcc, 0x89, 0xe4, + 0x05, 0x58, 0x77, 0x38, 0x8b, 0x18, 0xa0, 0x7c, 0x8d, + 0x20, 0x99, 0xea, 0x6e, + 0x6b, 0xe9, 0xf7, 0x0c, 0xe1, 0xe5, 0xce, 0xbc, 0x55, + 0x4c, 0x80, 0xa5, 0xdc, 0xae, 0xf7, 0x94 } + }, + + /* KMAC128XOF*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0x84, 0x07, 0x89, + 0x29, 0xa7, 0xf4, 0x98, 0x91, 0xf5, 0x64, 0x61, 0x8d, + 0xa5, 0x93, 0x00, 0x31 }, + .test[4].outlen = 16, .test[4].data = { 0xf0, 0xa4, 0x1b, + 0x98, 0x0f, 0xb3, 0xf2, 0xbd, 0xc3, 0xfc, 0x64, 0x1f, + 0x73, 0x1f, 0xd4, 0x74 }, + .test[5].outlen = 16, .test[5].data = { 0xa5, 0xc5, 0xad, + 0x25, 0x59, 0xf1, 0x5d, 0xea, 0x5b, 0x18, 0x0a, 0x52, + 0xce, 0x6c, 0xc0, 0x88 }, + .test[6].outlen = 16, .test[6].data = { 0x1a, 0x81, 0xdd, + 0x81, 0x47, 0x89, 0xf4, 0x15, 0xcc, 0x18, 0x05, 0x81, + 0xe3, 0x95, 0x21, 0xc3 } + }, + + /* KMAC256XOF*/ + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0xff, 0x85, 0xe9, + 0x61, 0x67, 0x96, 0x35, 0x58, 0x33, 0x38, 0x2c, 0xe8, + 0x25, 0x77, 0xbe, 0x63, + 0xd5, 0x2c, 0xa7, 0xef, 0xce, 0x9b, 0x63, 0x71, 0xb2, + 0x09, 0x7c, 0xd8, 0x60, 0x4e, 0x5a, 0xfa }, + .test[4].outlen = 32, .test[4].data = { 0x86, 0x89, 0xc2, + 0x4a, 0xe8, 0x18, 0x46, 0x10, 0x6b, 0xf2, 0x09, 0xd7, + 0x37, 0x83, 0xab, 0x77, + 0xb5, 0xce, 0x7c, 0x96, 0x9c, 0xfa, 0x0f, 0xa0, 0xd8, + 0xde, 0xb5, 0xb7, 0xc6, 0xcd, 0xa9, 0x8f }, + .test[5].outlen = 32, .test[5].data = { 0x4d, 0x71, 0x81, + 0x5a, 0x5f, 0xac, 0x3b, 0x29, 0xf2, 0x5f, 0xb6, 0x56, + 0xf1, 0x76, 0xcf, 0xdc, + 0x51, 0x56, 0xd7, 0x3c, 0x47, 0xec, 0x6d, 0xea, 0xc6, + 0x3e, 0x54, 0xe7, 0x6f, 0xdc, 0xe8, 0x39 }, + .test[6].outlen = 32, .test[6].data = { 0x5f, 0xc5, 0xe1, + 0x1e, 0xe7, 0x55, 0x0f, 0x62, 0x71, 0x29, 0xf3, 0x0a, + 0xb3, 0x30, 0x68, 0x06, + 0xea, 0xec, 0xe4, 0x37, 0x17, 0x37, 0x2d, 0x5d, 0x64, + 0x09, 0x70, 0x63, 0x94, 0x80, 0x9b, 0x80 } + }, + + /* HASH SM3*/ + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0xe0, 0xba, 0xb8, + 0xf4, 0xd8, 0x17, 0x2b, 0xa2, 0x45, 0x19, 0x0d, 0x13, + 0xc9, 0x41, 0x17, 0xe9, + 0x3b, 0x82, 0x16, 0x6c, 0x25, 0xb2, 0xb6, 0x98, 0x83, + 0x35, 0x0c, 0x19, 0x2c, 0x90, 0x51, 0x40 }, + .test[4].outlen = 32, .test[4].data = { 0xe0, 0xba, 0xb8, + 0xf4, 0xd8, 0x17, 0x2b, 0xa2, 0x45, 0x19, 0x0d, 0x13, + 0xc9, 0x41, 0x17, 0xe9, + 0x3b, 0x82, 0x16, 0x6c, 0x25, 0xb2, 0xb6, 0x98, 0x83, + 0x35, 0x0c, 0x19, 0x2c, 0x90, 0x51, 0x40 }, + .test[5].outlen = 32, .test[5].data = { 0xe0, 0xba, 0xb8, + 0xf4, 0xd8, 0x17, 0x2b, 0xa2, 0x45, 0x19, 0x0d, 0x13, + 0xc9, 0x41, 0x17, 0xe9, + 0x3b, 0x82, 0x16, 0x6c, 0x25, 0xb2, 0xb6, 0x98, 0x83, + 0x35, 0x0c, 0x19, 0x2c, 0x90, 0x51, 0x40 }, + .test[6].outlen = 32, .test[6].data = { 0xe0, 0xba, 0xb8, + 0xf4, 0xd8, 0x17, 0x2b, 0xa2, 0x45, 0x19, 0x0d, 0x13, + 0xc9, 0x41, 0x17, 0xe9, + 0x3b, 0x82, 0x16, 0x6c, 0x25, 0xb2, 0xb6, 0x98, 0x83, + 0x35, 0x0c, 0x19, 0x2c, 0x90, 0x51, 0x40 } + }, + + /* HMAC SM3*/ + { .min_version = 0x65, + .test[1].outlen = 32, .test[1].data = { 0x68, 0xf0, 0x65, + 0xd8, 0xd8, 0xc9, 0xc2, 0x0e, 0x10, 0xfd, 0x52, 0x7c, + 0xf2, 0xd7, 0x42, 0xd3, + 0x08, 0x44, 0x22, 0xbc, 0xf0, 0x9d, 0xcc, 0x34, 0x7b, + 0x76, 0x13, 0x91, 0xba, 0xce, 0x4d, 0x17 }, + .test[4].outlen = 32, .test[4].data = { 0xd8, 0xab, 0x2a, + 0x7b, 0x56, 0x21, 0xb1, 0x59, 0x64, 0xb2, 0xa3, 0xd6, + 0x72, 0xb3, 0x95, 0x81, + 0xa0, 0xcd, 0x96, 0x47, 0xf0, 0xbc, 0x8c, 0x16, 0x5b, + 0x9b, 0x7d, 0x2f, 0x71, 0x3f, 0x23, 0x19}, + .test[5].outlen = 32, .test[5].data = { 0xa0, 0xd1, 0xd5, + 0xa0, 0x9e, 0x4c, 0xca, 0x8c, 0x7b, 0xe0, 0x8f, 0x70, + 0x92, 0x2e, 0x3f, 0x4c, + 0xa0, 0xca, 0xef, 0xa1, 0x86, 0x9d, 0xb2, 0xe1, 0xc5, + 0xfa, 0x9d, 0xfa, 0xbc, 0x11, 0xcb, 0x1f }, + .test[6].outlen = 32, .test[6].data = { 0xa0, 0xd1, 0xd5, + 0xa0, 0x9e, 0x4c, 0xca, 0x8c, 0x7b, 0xe0, 0x8f, 0x70, + 0x92, 0x2e, 0x3f, 0x4c, + 0xa0, 0xca, 0xef, 0xa1, 0x86, 0x9d, 0xb2, 0xe1, 0xc5, + 0xfa, 0x9d, 0xfa, 0xbc, 0x11, 0xcb, 0x1f} + }, + + /* MAC_SM4_XCBC*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0x69, 0xaf, 0x45, + 0xe6, 0x0c, 0x78, 0x71, 0x7e, 0x44, 0x6c, 0xfe, 0x68, + 0xd4, 0xfe, 0x20, 0x8b }, + .test[4].outlen = 16, .test[4].data = { 0x69, 0xaf, 0x45, + 0xe6, 0x0c, 0x78, 0x71, 0x7e, 0x44, 0x6c, 0xfe, 0x68, + 0xd4, 0xfe, 0x20, 0x8b }, + .test[5].outlen = 16, .test[5].data = { 0x69, 0xaf, 0x45, + 0xe6, 0x0c, 0x78, 0x71, 0x7e, 0x44, 0x6c, 0xfe, 0x68, + 0xd4, 0xfe, 0x20, 0x8b }, + .test[6].outlen = 16, .test[6].data = { 0x69, 0xaf, 0x45, + 0xe6, 0x0c, 0x78, 0x71, 0x7e, 0x44, 0x6c, 0xfe, 0x68, + 0xd4, 0xfe, 0x20, 0x8b } + }, + + /* MAC_SM4_CMAC*/ + { .min_version = 0x65, + .test[1].outlen = 16, .test[1].data = { 0x36, 0xbe, 0xec, + 0x03, 0x9c, 0xc7, 0x0c, 0x28, 0x23, 0xdd, 0x71, 0x8b, + 0x3c, 0xbd, 0x7f, 0x37 }, + .test[4].outlen = 16, .test[4].data = { 0x36, 0xbe, 0xec, + 0x03, 0x9c, 0xc7, 0x0c, 0x28, 0x23, 0xdd, 0x71, 0x8b, + 0x3c, 0xbd, 0x7f, 0x37 }, + .test[5].outlen = 16, .test[5].data = { 0x36, 0xbe, 0xec, + 0x03, 0x9c, 0xc7, 0x0c, 0x28, 0x23, 0xdd, 0x71, 0x8b, + 0x3c, 0xbd, 0x7f, 0x37 }, + .test[6].outlen = 16, .test[6].data = { 0x36, 0xbe, 0xec, + 0x03, 0x9c, 0xc7, 0x0c, 0x28, 0x23, 0xdd, 0x71, 0x8b, + 0x3c, 0xbd, 0x7f, 0x37 } + }, + +}; +#endif + int spacc_sg_to_ddt(struct device *dev, struct scatterlist *sg, int nbytes, struct pdu_ddt *ddt, int dma_direction) { @@ -449,6 +1324,203 @@ int spacc_close(struct spacc_device *dev, int handle) return spacc_job_release(dev, handle); } +#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AUTODETECT) +static int spacc_set_auxinfo(struct spacc_device *spacc, int jobid, + uint32_t direction, uint32_t bitsize) +{ + int ret = CRYPTO_OK; + struct spacc_job *job; + + if (jobid < 0 || jobid > SPACC_MAX_JOBS) + return -ENXIO; + + job = &spacc->job[jobid]; + if (!job) { + ret = -EIO; + } else { + job->auxinfo_dir = direction; + job->auxinfo_bit_align = bitsize; + } + + return ret; +} + +static void check_modes(struct spacc_device *spacc, int x, int y, void *virt, + char *key, struct pdu_ddt *ddt) +{ + int proclen, aadlen, ivsize, h, err, enc, hash; + + if (template[x] & (1 << y)) { + /* testing keysizes[y] with algo 'x' which + * should match the ENUMs above + */ + + if (template[x] & 128) { + enc = 0; + hash = x; + } else { + enc = x; + hash = 0; + } + + h = spacc_open(spacc, enc, hash, -1, 0, NULL, NULL); + if (h < 0) { + spacc->config.modes[x] &= ~(1 << y); + return; + } + + spacc_set_operation(spacc, h, OP_ENCRYPT, 0, IP_ICV_APPEND, 0, + 0, 0); + + /* if this is a hash or mac*/ + if (template[x] & 128) { + switch (x) { + case CRYPTO_MODE_HASH_CSHAKE128: + case CRYPTO_MODE_HASH_CSHAKE256: + case CRYPTO_MODE_MAC_KMAC128: + case CRYPTO_MODE_MAC_KMAC256: + case CRYPTO_MODE_MAC_KMACXOF128: + case CRYPTO_MODE_MAC_KMACXOF256: + /* special initial bytes to encode + * length for cust strings + */ + key[0] = 0x01; + key[1] = 0x70; + break; + } + + spacc_write_context(spacc, h, SPACC_HASH_OPERATION, + key, keysizes[1][y] + + (x == CRYPTO_MODE_MAC_XCBC ? 32 : 0), + key, 16); + } else { + u32 keysize; + + ivsize = 16; + keysize = keysizes[0][y]; + switch (x) { + case CRYPTO_MODE_CHACHA20_STREAM: + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_SM4_CCM: + ivsize = 16; + break; + case CRYPTO_MODE_SM4_GCM: + case CRYPTO_MODE_CHACHA20_POLY1305: + case CRYPTO_MODE_AES_GCM: + ivsize = 12; + break; + case CRYPTO_MODE_KASUMI_ECB: + case CRYPTO_MODE_KASUMI_F8: + case CRYPTO_MODE_3DES_CBC: + case CRYPTO_MODE_3DES_ECB: + case CRYPTO_MODE_DES_CBC: + case CRYPTO_MODE_DES_ECB: + ivsize = 8; + break; + case CRYPTO_MODE_SM4_XTS: + case CRYPTO_MODE_AES_XTS: + keysize <<= 1; + break; + } + spacc_write_context(spacc, h, SPACC_CRYPTO_OPERATION, + key, keysize, key, ivsize); + } + + spacc_set_key_exp(spacc, h); + + switch (x) { + case CRYPTO_MODE_ZUC_UEA3: + case CRYPTO_MODE_SNOW3G_UEA2: + case CRYPTO_MODE_MAC_SNOW3G_UIA2: + case CRYPTO_MODE_MAC_ZUC_UIA3: + case CRYPTO_MODE_KASUMI_F8: + spacc_set_auxinfo(spacc, h, 0, 0); + break; + case CRYPTO_MODE_MAC_KASUMI_F9: + spacc_set_auxinfo(spacc, h, 0, 8); + break; + } + + memset(virt, 0, 256); + + /* 16AAD/16PT or 32AAD/0PT depending on + * whether we're in a hash or not mode + */ + aadlen = 16; + proclen = 32; + if (!enc) + aadlen += 16; + + switch (x) { + case CRYPTO_MODE_SM4_CS1: + case CRYPTO_MODE_SM4_CS2: + case CRYPTO_MODE_SM4_CS3: + case CRYPTO_MODE_AES_CS1: + case CRYPTO_MODE_AES_CS2: + case CRYPTO_MODE_AES_CS3: + proclen = 31; + fallthrough; + case CRYPTO_MODE_SM4_XTS: + case CRYPTO_MODE_AES_XTS: + aadlen = 0; + } + + err = spacc_packet_enqueue_ddt(spacc, h, ddt, ddt, proclen, 0, + aadlen, 0, 0, 0); + if (err == CRYPTO_OK) { + do { + err = spacc_packet_dequeue(spacc, h); + } while (err == -EINPROGRESS); + } + if (err != CRYPTO_OK || !testdata[x].test[y].outlen || + memcmp(testdata[x].test[y].data, virt, + testdata[x].test[y].outlen)) { + spacc->config.modes[x] &= ~(1 << y); + } + spacc_close(spacc, h); + } +} + +int spacc_autodetect(struct spacc_device *spacc) +{ + struct pdu_ddt ddt; + dma_addr_t dma; + void *virt; + int x, y; + unsigned char key[64]; + + /* allocate DMA memory ...*/ + virt = dma_alloc_coherent(get_ddt_device(), 256, &dma, GFP_KERNEL); + if (!virt) + return -2; + + if (pdu_ddt_init(&ddt, 1)) { + dma_free_coherent(get_ddt_device(), 256, virt, dma); + return -3; + } + + pdu_ddt_add(&ddt, dma, 256); + + for (x = 0; x < 64; x++) + key[x] = x; + + for (x = 0; x < ARRAY_SIZE(template); x++) { + spacc->config.modes[x] = template[x]; + if (template[x] && spacc->config.version >= + testdata[x].min_version) { + for (y = 0; y < (ARRAY_SIZE(keysizes[0])); y++) + check_modes(spacc, x, y, virt, key, &ddt); + } + } + + pdu_ddt_free(&ddt); + dma_free_coherent(get_ddt_device(), 256, virt, dma); + + return 0; +} + +#else + static void spacc_static_modes(struct spacc_device *spacc, int x, int y) { /* Disable the algos that as not supported here */ @@ -486,7 +1558,7 @@ int spacc_static_config(struct spacc_device *spacc) return 0; } - +#endif int spacc_clone_handle(struct spacc_device *spacc, int old_handle, void *cbdata) { @@ -543,19 +1615,66 @@ int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctxid, ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); break; + + case CRYPTO_MODE_AES_CS1: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + job->auxinfo_cs_mode = 1; + break; + case CRYPTO_MODE_AES_CS2: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + job->auxinfo_cs_mode = 2; + break; case CRYPTO_MODE_AES_CS3: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); job->auxinfo_cs_mode = 3; break; + case CRYPTO_MODE_AES_CFB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CFB); + break; + case CRYPTO_MODE_AES_OFB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_OFB); + break; case CRYPTO_MODE_AES_CTR: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CTR); break; + case CRYPTO_MODE_AES_CCM: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CCM); + break; + case CRYPTO_MODE_AES_GCM: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_GCM); + break; + case CRYPTO_MODE_AES_F8: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_F8); + break; case CRYPTO_MODE_AES_XTS: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_XTS); break; + case CRYPTO_MODE_MULTI2_ECB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_MULTI2); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); + break; + case CRYPTO_MODE_MULTI2_CBC: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_MULTI2); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + break; + case CRYPTO_MODE_MULTI2_OFB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_MULTI2); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_OFB); + break; + case CRYPTO_MODE_MULTI2_CFB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_MULTI2); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CFB); + break; case CRYPTO_MODE_3DES_CBC: case CRYPTO_MODE_DES_CBC: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_DES); @@ -566,10 +1685,34 @@ int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctxid, ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_DES); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); break; + case CRYPTO_MODE_KASUMI_ECB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_KASUMI); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); + break; + case CRYPTO_MODE_KASUMI_F8: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_KASUMI); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_F8); + break; + case CRYPTO_MODE_SNOW3G_UEA2: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, + C_SNOW3G_UEA2); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); + break; + case CRYPTO_MODE_ZUC_UEA3: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, + C_ZUC_UEA3); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); + break; case CRYPTO_MODE_CHACHA20_STREAM: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_CHACHA20); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CHACHA_STREAM); break; + case CRYPTO_MODE_CHACHA20_POLY1305: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, + C_CHACHA20); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, + CM_CHACHA_AEAD); + break; case CRYPTO_MODE_SM4_ECB: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); @@ -578,15 +1721,45 @@ int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctxid, ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); break; + case CRYPTO_MODE_SM4_CS1: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + job->auxinfo_cs_mode = 1; + break; + case CRYPTO_MODE_SM4_CS2: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); + job->auxinfo_cs_mode = 2; + break; case CRYPTO_MODE_SM4_CS3: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); job->auxinfo_cs_mode = 3; break; + case CRYPTO_MODE_SM4_CFB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CFB); + break; + case CRYPTO_MODE_SM4_OFB: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_OFB); + break; case CRYPTO_MODE_SM4_CTR: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CTR); break; + case CRYPTO_MODE_SM4_CCM: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CCM); + break; + case CRYPTO_MODE_SM4_GCM: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_GCM); + break; + case CRYPTO_MODE_SM4_F8: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_F8); + break; case CRYPTO_MODE_SM4_XTS: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_XTS); @@ -595,6 +1768,216 @@ int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctxid, ret = -EOPNOTSUPP; } + switch (hash) { + case CRYPTO_MODE_NULL: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_NULL); + break; + case CRYPTO_MODE_HMAC_SHA1: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA1); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_HMAC_MD5: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_MD5); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_HMAC_SHA224: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA224); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_HMAC_SHA256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_HMAC_SHA384: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA384); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_HMAC_SHA512: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA512); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_HMAC_SHA512_224: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHA512_224); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_HMAC_SHA512_256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHA512_256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_SSLMAC_MD5: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_MD5); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SSLMAC); + break; + case CRYPTO_MODE_SSLMAC_SHA1: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA1); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SSLMAC); + break; + case CRYPTO_MODE_HASH_SHA1: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA1); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_MD5: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_MD5); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA224: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA224); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA384: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA384); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA512: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA512); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA512_224: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHA512_224); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA512_256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHA512_256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA3_224: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHA3_224); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA3_256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHA3_256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA3_384: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHA3_384); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHA3_512: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHA3_512); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SHAKE128: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHAKE128); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SHAKE_SHAKE); + break; + case CRYPTO_MODE_HASH_SHAKE256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHAKE256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SHAKE_SHAKE); + break; + case CRYPTO_MODE_HASH_CSHAKE128: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHAKE128); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SHAKE_CSHAKE); + break; + case CRYPTO_MODE_HASH_CSHAKE256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHAKE256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SHAKE_CSHAKE); + break; + case CRYPTO_MODE_MAC_KMAC128: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHAKE128); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SHAKE_KMAC); + break; + case CRYPTO_MODE_MAC_KMAC256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHAKE256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SHAKE_KMAC); break; + case CRYPTO_MODE_MAC_KMACXOF128: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHAKE128); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SHAKE_KMAC); + /* auxinfo_dir reused to indicate XOF */ + job->auxinfo_dir = 1; + break; + case CRYPTO_MODE_MAC_KMACXOF256: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SHAKE256); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, + HM_SHAKE_KMAC); + /* auxinfo_dir reused to indicate XOF */ + job->auxinfo_dir = 1; + break; + case CRYPTO_MODE_MAC_XCBC: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_XCBC); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_MAC_CMAC: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_CMAC); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_MAC_KASUMI_F9: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_KF9); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_MAC_SNOW3G_UIA2: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SNOW3G_UIA2); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_MAC_ZUC_UIA3: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_ZUC_UIA3); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_MAC_POLY1305: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_POLY1305); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_CRC32: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_CRC32_I3E802_3); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_MAC_MICHAEL: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_MICHAEL); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HASH_SM3: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SM3); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_HMAC_SM3: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SM3); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); + break; + case CRYPTO_MODE_MAC_SM4_XCBC: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SM4_XCBC_MAC); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + case CRYPTO_MODE_MAC_SM4_CMAC: + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, + H_SM4_CMAC); + ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); + break; + default: + ret = -EOPNOTSUPP; + } ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_MSG_BEGIN) | SPACC_CTRL_MASK(SPACC_CTRL_MSG_END); From 9f1a7ab4d31ef30fbf8adb0985300049469f2270 Mon Sep 17 00:00:00 2001 From: Pavitrakumar M Date: Mon, 29 Jul 2024 09:43:47 +0530 Subject: [PATCH 13/96] crypto: spacc - Add SPAcc ahash support Signed-off-by: shwetar Signed-off-by: Pavitrakumar M Acked-by: Ruud Derwig Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_ahash.c | 914 +++++++++++++++++++++++++ 1 file changed, 914 insertions(+) create mode 100644 drivers/crypto/dwc-spacc/spacc_ahash.c diff --git a/drivers/crypto/dwc-spacc/spacc_ahash.c b/drivers/crypto/dwc-spacc/spacc_ahash.c new file mode 100644 index 000000000000..ed63855d4931 --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_ahash.c @@ -0,0 +1,914 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spacc_device.h" +#include "spacc_core.h" + +#define PPP_BUF_SIZE 128 + +struct sdesc { + struct shash_desc shash; + char ctx[]; +}; + +static struct dma_pool *spacc_hash_pool; +static LIST_HEAD(spacc_hash_alg_list); +static LIST_HEAD(head_sglbuf); +static DEFINE_MUTEX(spacc_hash_alg_mutex); + +static struct mode_tab possible_hashes[] = { + { .keylen[0] = 16, MODE_TAB_HASH("cmac(aes)", MAC_CMAC, 16, 16), + .sw_fb = true }, + { .keylen[0] = 48 | MODE_TAB_HASH_XCBC, MODE_TAB_HASH("xcbc(aes)", + MAC_XCBC, 16, 16), .sw_fb = true }, + + { MODE_TAB_HASH("cmac(sm4)", MAC_SM4_CMAC, 16, 16), .sw_fb = true }, + { .keylen[0] = 32 | MODE_TAB_HASH_XCBC, MODE_TAB_HASH("xcbc(sm4)", + MAC_SM4_XCBC, 16, 16), .sw_fb = true }, + + { MODE_TAB_HASH("hmac(md5)", HMAC_MD5, MD5_DIGEST_SIZE, + MD5_HMAC_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("md5", HASH_MD5, MD5_DIGEST_SIZE, + MD5_HMAC_BLOCK_SIZE), .sw_fb = true }, + + { MODE_TAB_HASH("hmac(sha1)", HMAC_SHA1, SHA1_DIGEST_SIZE, + SHA1_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("sha1", HASH_SHA1, SHA1_DIGEST_SIZE, + SHA1_BLOCK_SIZE), .sw_fb = true }, + + { MODE_TAB_HASH("sha224", HASH_SHA224, SHA224_DIGEST_SIZE, + SHA224_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("sha256", HASH_SHA256, SHA256_DIGEST_SIZE, + SHA256_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("sha384", HASH_SHA384, SHA384_DIGEST_SIZE, + SHA384_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("sha512", HASH_SHA512, SHA512_DIGEST_SIZE, + SHA512_BLOCK_SIZE), .sw_fb = true }, + + { MODE_TAB_HASH("hmac(sha512)", HMAC_SHA512, SHA512_DIGEST_SIZE, + SHA512_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("hmac(sha224)", HMAC_SHA224, SHA224_DIGEST_SIZE, + SHA224_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("hmac(sha256)", HMAC_SHA256, SHA256_DIGEST_SIZE, + SHA256_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("hmac(sha384)", HMAC_SHA384, SHA384_DIGEST_SIZE, + SHA384_BLOCK_SIZE), .sw_fb = true }, + + { MODE_TAB_HASH("sha3-224", HASH_SHA3_224, SHA3_224_DIGEST_SIZE, + SHA3_224_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("sha3-256", HASH_SHA3_256, SHA3_256_DIGEST_SIZE, + SHA3_256_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("sha3-384", HASH_SHA3_384, SHA3_384_DIGEST_SIZE, + SHA3_384_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("sha3-512", HASH_SHA3_512, SHA3_512_DIGEST_SIZE, + SHA3_512_BLOCK_SIZE), .sw_fb = true }, + + { MODE_TAB_HASH("hmac(sm3)", HMAC_SM3, SM3_DIGEST_SIZE, + SM3_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("sm3", HASH_SM3, SM3_DIGEST_SIZE, + SM3_BLOCK_SIZE), .sw_fb = true }, + { MODE_TAB_HASH("michael_mic", MAC_MICHAEL, 8, 8), .sw_fb = true }, +}; + +static void spacc_hash_cleanup_dma_dst(struct spacc_crypto_ctx *tctx, + struct ahash_request *req) +{ + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + + pdu_ddt_free(&ctx->dst); +} + +static void spacc_hash_cleanup_dma_src(struct spacc_crypto_ctx *tctx, + struct ahash_request *req) +{ + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + + if (tctx->tmp_sgl && tctx->tmp_sgl[0].length != 0) { + dma_unmap_sg(tctx->dev, tctx->tmp_sgl, ctx->src_nents, + DMA_TO_DEVICE); + kfree(tctx->tmp_sgl_buff); + tctx->tmp_sgl_buff = NULL; + tctx->tmp_sgl[0].length = 0; + } else { + dma_unmap_sg(tctx->dev, req->src, ctx->src_nents, + DMA_TO_DEVICE); + } + + pdu_ddt_free(&ctx->src); +} + +static void spacc_hash_cleanup_dma(struct device *dev, + struct ahash_request *req) +{ + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + + dma_unmap_sg(dev, req->src, ctx->src_nents, DMA_TO_DEVICE); + pdu_ddt_free(&ctx->src); + + dma_pool_free(spacc_hash_pool, ctx->digest_buf, ctx->digest_dma); + pdu_ddt_free(&ctx->dst); +} + +static void spacc_init_calg(struct crypto_alg *calg, + const struct mode_tab *mode) +{ + + strscpy(calg->cra_name, mode->name); + calg->cra_name[sizeof(mode->name) - 1] = '\0'; + + strscpy(calg->cra_driver_name, "spacc-"); + strcat(calg->cra_driver_name, mode->name); + calg->cra_driver_name[sizeof(calg->cra_driver_name) - 1] = '\0'; + + calg->cra_blocksize = mode->blocklen; +} + +static int spacc_ctx_clone_handle(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + struct spacc_priv *priv = dev_get_drvdata(tctx->dev); + + if (tctx->handle < 0) + return -ENXIO; + + ctx->acb.new_handle = spacc_clone_handle(&priv->spacc, tctx->handle, + &ctx->acb); + + if (ctx->acb.new_handle < 0) { + spacc_hash_cleanup_dma(tctx->dev, req); + return -ENOMEM; + } + + ctx->acb.tctx = tctx; + ctx->acb.ctx = ctx; + ctx->acb.req = req; + ctx->acb.spacc = &priv->spacc; + + return 0; +} + +static int spacc_hash_init_dma(struct device *dev, struct ahash_request *req, + int final) +{ + int rc = -1; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + + gfp_t mflags = GFP_ATOMIC; + + if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) + mflags = GFP_KERNEL; + + ctx->digest_buf = dma_pool_alloc(spacc_hash_pool, mflags, + &ctx->digest_dma); + + if (!ctx->digest_buf) + return -ENOMEM; + + rc = pdu_ddt_init(&ctx->dst, 1 | 0x80000000); + if (rc < 0) { + pr_err("ERR: PDU DDT init error\n"); + rc = -EIO; + goto err_free_digest; + } + + pdu_ddt_add(&ctx->dst, ctx->digest_dma, SPACC_MAX_DIGEST_SIZE); + + if (ctx->total_nents > 0 && ctx->single_shot) { + /* single shot */ + spacc_ctx_clone_handle(req); + + if (req->nbytes) { + rc = spacc_sg_to_ddt(dev, req->src, req->nbytes, + &ctx->src, DMA_TO_DEVICE); + } else { + memset(tctx->tmp_buffer, '\0', PPP_BUF_SIZE); + sg_set_buf(&(tctx->tmp_sgl[0]), tctx->tmp_buffer, + PPP_BUF_SIZE); + rc = spacc_sg_to_ddt(dev, &(tctx->tmp_sgl[0]), + tctx->tmp_sgl[0].length, + &ctx->src, DMA_TO_DEVICE); + + } + } else if (ctx->total_nents == 0 && req->nbytes == 0) { + spacc_ctx_clone_handle(req); + + /* zero length case */ + memset(tctx->tmp_buffer, '\0', PPP_BUF_SIZE); + sg_set_buf(&(tctx->tmp_sgl[0]), tctx->tmp_buffer, PPP_BUF_SIZE); + rc = spacc_sg_to_ddt(dev, &(tctx->tmp_sgl[0]), + tctx->tmp_sgl[0].length, + &ctx->src, DMA_TO_DEVICE); + + } + + if (rc < 0) + goto err_free_dst; + + ctx->src_nents = rc; + + return rc; + +err_free_dst: + pdu_ddt_free(&ctx->dst); +err_free_digest: + dma_pool_free(spacc_hash_pool, ctx->digest_buf, ctx->digest_dma); + + return rc; +} + +static void spacc_free_mems(struct spacc_crypto_reqctx *ctx, + struct spacc_crypto_ctx *tctx, + struct ahash_request *req) +{ + spacc_hash_cleanup_dma_dst(tctx, req); + spacc_hash_cleanup_dma_src(tctx, req); + + if (ctx->single_shot) { + kfree(tctx->tmp_sgl); + tctx->tmp_sgl = NULL; + + ctx->single_shot = 0; + if (ctx->total_nents) + ctx->total_nents = 0; + } +} + +static void spacc_digest_cb(void *spacc, void *tfm) +{ + struct ahash_cb_data *cb = tfm; + int err = -1; + int dig_sz; + + dig_sz = crypto_ahash_digestsize(crypto_ahash_reqtfm(cb->req)); + + if (cb->ctx->single_shot) + memcpy(cb->req->result, cb->ctx->digest_buf, dig_sz); + else + memcpy(cb->tctx->digest_ctx_buf, cb->ctx->digest_buf, dig_sz); + + err = cb->spacc->job[cb->new_handle].job_err; + + dma_pool_free(spacc_hash_pool, cb->ctx->digest_buf, + cb->ctx->digest_dma); + spacc_free_mems(cb->ctx, cb->tctx, cb->req); + spacc_close(cb->spacc, cb->new_handle); + + if (cb->req->base.complete) + ahash_request_complete(cb->req, err); +} + +static int do_shash(unsigned char *name, unsigned char *result, + const u8 *data1, unsigned int data1_len, + const u8 *data2, unsigned int data2_len, + const u8 *key, unsigned int key_len) +{ + int rc; + unsigned int size; + struct crypto_shash *hash; + struct sdesc *sdesc; + + hash = crypto_alloc_shash(name, 0, 0); + if (IS_ERR(hash)) { + rc = PTR_ERR(hash); + pr_err("ERR: Crypto %s allocation error %d\n", name, rc); + return rc; + } + + size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); + sdesc = kmalloc(size, GFP_KERNEL); + if (!sdesc) { + rc = -ENOMEM; + goto do_shash_err; + } + sdesc->shash.tfm = hash; + + if (key_len > 0) { + rc = crypto_shash_setkey(hash, key, key_len); + if (rc) { + pr_err("ERR: Could not setkey %s shash\n", name); + goto do_shash_err; + } + } + + rc = crypto_shash_init(&sdesc->shash); + if (rc) { + pr_err("ERR: Could not init %s shash\n", name); + goto do_shash_err; + } + + rc = crypto_shash_update(&sdesc->shash, data1, data1_len); + if (rc) { + pr_err("ERR: Could not update1\n"); + goto do_shash_err; + } + + if (data2 && data2_len) { + rc = crypto_shash_update(&sdesc->shash, data2, data2_len); + if (rc) { + pr_err("ERR: Could not update2\n"); + goto do_shash_err; + } + } + + rc = crypto_shash_final(&sdesc->shash, result); + if (rc) + pr_err("ERR: Could not generate %s hash\n", name); + +do_shash_err: + crypto_free_shash(hash); + kfree(sdesc); + + return rc; +} + +static int spacc_hash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + int rc; + const struct spacc_alg *salg = spacc_tfm_ahash(&tfm->base); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm); + struct spacc_priv *priv = dev_get_drvdata(tctx->dev); + unsigned int digest_size, block_size; + char hash_alg[CRYPTO_MAX_ALG_NAME]; + + block_size = crypto_tfm_alg_blocksize(&tfm->base); + digest_size = crypto_ahash_digestsize(tfm); + + /* + * We will not use the hardware in case of HMACs + * This was meant for hashes but it works for cmac/xcbc since we + * only intend to support 128-bit keys... + */ + if (keylen > block_size && salg->mode->id != CRYPTO_MODE_MAC_CMAC) { + pr_debug("Exceeds keylen: %u\n", keylen); + pr_debug("Req. keylen hashing %s\n", + salg->calg->cra_name); + + memset(hash_alg, 0x00, CRYPTO_MAX_ALG_NAME); + switch (salg->mode->id) { + case CRYPTO_MODE_HMAC_SHA224: + rc = do_shash("sha224", tctx->ipad, key, keylen, + NULL, 0, NULL, 0); + break; + + case CRYPTO_MODE_HMAC_SHA256: + rc = do_shash("sha256", tctx->ipad, key, keylen, + NULL, 0, NULL, 0); + break; + + case CRYPTO_MODE_HMAC_SHA384: + rc = do_shash("sha384", tctx->ipad, key, keylen, + NULL, 0, NULL, 0); + break; + + case CRYPTO_MODE_HMAC_SHA512: + rc = do_shash("sha512", tctx->ipad, key, keylen, + NULL, 0, NULL, 0); + break; + + case CRYPTO_MODE_HMAC_MD5: + rc = do_shash("md5", tctx->ipad, key, keylen, + NULL, 0, NULL, 0); + break; + + case CRYPTO_MODE_HMAC_SHA1: + rc = do_shash("sha1", tctx->ipad, key, keylen, + NULL, 0, NULL, 0); + break; + + default: + return -EINVAL; + } + + if (rc < 0) { + pr_err("ERR: %d computing shash for %s\n", + rc, hash_alg); + return -EIO; + } + + keylen = digest_size; + pr_debug("updated keylen: %u\n", keylen); + + tctx->ctx_valid = false; + + if (salg->mode->sw_fb) { + rc = crypto_ahash_setkey(tctx->fb.hash, + tctx->ipad, keylen); + if (rc < 0) + return rc; + } + } else { + memcpy(tctx->ipad, key, keylen); + tctx->ctx_valid = false; + + if (salg->mode->sw_fb) { + rc = crypto_ahash_setkey(tctx->fb.hash, key, keylen); + if (rc < 0) + return rc; + } + } + + /* close handle since key size may have changed */ + if (tctx->handle >= 0) { + spacc_close(&priv->spacc, tctx->handle); + put_device(tctx->dev); + tctx->handle = -1; + tctx->dev = NULL; + } + + priv = NULL; + priv = dev_get_drvdata(salg->dev[0]); + tctx->dev = get_device(salg->dev[0]); + if (spacc_isenabled(&priv->spacc, salg->mode->id, keylen)) { + tctx->handle = spacc_open(&priv->spacc, + CRYPTO_MODE_NULL, + salg->mode->id, -1, + 0, spacc_digest_cb, tfm); + + } else + pr_debug(" Keylen: %d not enabled for algo: %d", + keylen, salg->mode->id); + + if (tctx->handle < 0) { + pr_err("ERR: Failed to open SPAcc context\n"); + put_device(salg->dev[0]); + return -EIO; + } + + rc = spacc_set_operation(&priv->spacc, tctx->handle, OP_ENCRYPT, + ICV_HASH, IP_ICV_OFFSET, 0, 0, 0); + if (rc < 0) { + spacc_close(&priv->spacc, tctx->handle); + tctx->handle = -1; + put_device(tctx->dev); + return -EIO; + } + + if (salg->mode->id == CRYPTO_MODE_MAC_XCBC || + salg->mode->id == CRYPTO_MODE_MAC_SM4_XCBC) { + rc = spacc_compute_xcbc_key(&priv->spacc, salg->mode->id, + tctx->handle, tctx->ipad, + keylen, tctx->ipad); + if (rc < 0) { + dev_warn(tctx->dev, + "Failed to compute XCBC key: %d\n", rc); + return -EIO; + } + rc = spacc_write_context(&priv->spacc, tctx->handle, + SPACC_HASH_OPERATION, tctx->ipad, + 32 + keylen, NULL, 0); + } else { + rc = spacc_write_context(&priv->spacc, tctx->handle, + SPACC_HASH_OPERATION, tctx->ipad, + keylen, NULL, 0); + } + + memset(tctx->ipad, 0, sizeof(tctx->ipad)); + if (rc < 0) { + pr_err("ERR: Failed to write SPAcc context\n"); + /* Non-fatal; we continue with the software fallback. */ + return 0; + } + + tctx->ctx_valid = true; + + return 0; +} + +static int spacc_set_statesize(struct spacc_alg *salg) +{ + unsigned int statesize = 0; + + switch (salg->mode->id) { + case CRYPTO_MODE_HMAC_SHA1: + case CRYPTO_MODE_HASH_SHA1: + statesize = sizeof(struct sha1_state); + break; + case CRYPTO_MODE_MAC_CMAC: + case CRYPTO_MODE_MAC_XCBC: + statesize = sizeof(struct crypto_aes_ctx); + break; + case CRYPTO_MODE_MAC_SM4_CMAC: + case CRYPTO_MODE_MAC_SM4_XCBC: + statesize = sizeof(struct sm4_ctx); + break; + case CRYPTO_MODE_HMAC_MD5: + case CRYPTO_MODE_HASH_MD5: + statesize = sizeof(struct md5_state); + break; + case CRYPTO_MODE_HASH_SHA224: + case CRYPTO_MODE_HASH_SHA256: + case CRYPTO_MODE_HMAC_SHA224: + case CRYPTO_MODE_HMAC_SHA256: + statesize = sizeof(struct sha256_state); + break; + case CRYPTO_MODE_HMAC_SHA512: + case CRYPTO_MODE_HASH_SHA512: + statesize = sizeof(struct sha512_state); + break; + case CRYPTO_MODE_HMAC_SHA384: + case CRYPTO_MODE_HASH_SHA384: + statesize = sizeof(struct spacc_crypto_reqctx); + break; + case CRYPTO_MODE_HASH_SHA3_224: + case CRYPTO_MODE_HASH_SHA3_256: + case CRYPTO_MODE_HASH_SHA3_384: + case CRYPTO_MODE_HASH_SHA3_512: + statesize = sizeof(struct sha3_state); + break; + case CRYPTO_MODE_HMAC_SM3: + case CRYPTO_MODE_MAC_MICHAEL: + statesize = sizeof(struct spacc_crypto_reqctx); + break; + default: + break; + } + + return statesize; +} + +static int spacc_hash_cra_init(struct crypto_tfm *tfm) +{ + const struct spacc_alg *salg = spacc_tfm_ahash(tfm); + struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm); + struct spacc_priv *priv = NULL; + + tctx->handle = -1; + tctx->ctx_valid = false; + tctx->dev = get_device(salg->dev[0]); + + if (salg->mode->sw_fb) { + tctx->fb.hash = crypto_alloc_ahash(salg->calg->cra_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(tctx->fb.hash)) { + if (tctx->handle >= 0) + spacc_close(&priv->spacc, tctx->handle); + put_device(tctx->dev); + return PTR_ERR(tctx->fb.hash); + } + + crypto_ahash_set_statesize(__crypto_ahash_cast(tfm), + crypto_ahash_statesize(tctx->fb.hash)); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct spacc_crypto_reqctx) + + crypto_ahash_reqsize(tctx->fb.hash)); + + } else { + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct spacc_crypto_reqctx)); + } + + return 0; +} + +static void spacc_hash_cra_exit(struct crypto_tfm *tfm) +{ + struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm); + struct spacc_priv *priv = dev_get_drvdata(tctx->dev); + + crypto_free_ahash(tctx->fb.hash); + + if (tctx->handle >= 0) + spacc_close(&priv->spacc, tctx->handle); + + put_device(tctx->dev); +} + +static int spacc_hash_init(struct ahash_request *req) +{ + int rc = 0; + struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + + ctx->digest_buf = NULL; + ctx->single_shot = 0; + ctx->total_nents = 0; + tctx->tmp_sgl = NULL; + + ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); + ctx->fb.hash_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rc = crypto_ahash_init(&ctx->fb.hash_req); + + return rc; +} + +static int spacc_hash_update(struct ahash_request *req) +{ + int rc; + int nbytes = req->nbytes; + + struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + + if (!nbytes) + return 0; + + pr_debug("%s Using SW fallback\n", __func__); + + + ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); + ctx->fb.hash_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + ctx->fb.hash_req.nbytes = req->nbytes; + ctx->fb.hash_req.src = req->src; + + rc = crypto_ahash_update(&ctx->fb.hash_req); + return rc; +} + +static int spacc_hash_final(struct ahash_request *req) +{ + struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + int rc; + + + ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); + ctx->fb.hash_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + ctx->fb.hash_req.result = req->result; + + rc = crypto_ahash_final(&ctx->fb.hash_req); + return rc; +} + +static int spacc_hash_digest(struct ahash_request *req) +{ + int ret, final = 0; + int rc; + struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + struct spacc_priv *priv = dev_get_drvdata(tctx->dev); + const struct spacc_alg *salg = spacc_tfm_ahash(&reqtfm->base); + + + /* direct single shot digest call */ + ctx->single_shot = 1; + ctx->total_nents = sg_nents(req->src); + + /* alloc tmp_sgl */ + tctx->tmp_sgl = kmalloc(sizeof(*(tctx->tmp_sgl)) * 2, GFP_KERNEL); + + if (!tctx->tmp_sgl) + return -ENOMEM; + + sg_init_table(tctx->tmp_sgl, 2); + tctx->tmp_sgl[0].length = 0; + + + if (tctx->handle < 0 || !tctx->ctx_valid) { + priv = NULL; + pr_debug("%s: open SPAcc context\n", __func__); + + priv = dev_get_drvdata(salg->dev[0]); + tctx->dev = get_device(salg->dev[0]); + ret = spacc_isenabled(&priv->spacc, salg->mode->id, 0); + if (ret) + tctx->handle = spacc_open(&priv->spacc, + CRYPTO_MODE_NULL, + salg->mode->id, -1, 0, + spacc_digest_cb, + reqtfm); + + if (tctx->handle < 0) { + put_device(salg->dev[0]); + pr_debug("Failed to open SPAcc context\n"); + goto fallback; + } + + rc = spacc_set_operation(&priv->spacc, tctx->handle, + OP_ENCRYPT, ICV_HASH, IP_ICV_OFFSET, + 0, 0, 0); + if (rc < 0) { + spacc_close(&priv->spacc, tctx->handle); + pr_debug("Failed to open SPAcc context\n"); + tctx->handle = -1; + put_device(tctx->dev); + goto fallback; + } + tctx->ctx_valid = true; + } + + rc = spacc_hash_init_dma(tctx->dev, req, final); + if (rc < 0) + goto fallback; + + if (rc == 0) + return 0; + + rc = spacc_packet_enqueue_ddt(&priv->spacc, ctx->acb.new_handle, + &ctx->src, &ctx->dst, req->nbytes, + 0, req->nbytes, 0, 0, 0); + + if (rc < 0) { + spacc_hash_cleanup_dma(tctx->dev, req); + spacc_close(&priv->spacc, ctx->acb.new_handle); + + if (rc != -EBUSY) { + pr_debug("Failed to enqueue job, ERR: %d\n", rc); + return rc; + } + + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + + goto fallback; + } + + return -EINPROGRESS; + +fallback: + /* Start from scratch as init is not called before digest */ + ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); + ctx->fb.hash_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + ctx->fb.hash_req.nbytes = req->nbytes; + ctx->fb.hash_req.src = req->src; + ctx->fb.hash_req.result = req->result; + + return crypto_ahash_digest(&ctx->fb.hash_req); +} + +static int spacc_hash_finup(struct ahash_request *req) +{ + struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + int rc; + + ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); + ctx->fb.hash_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + ctx->fb.hash_req.nbytes = req->nbytes; + ctx->fb.hash_req.src = req->src; + ctx->fb.hash_req.result = req->result; + + rc = crypto_ahash_finup(&ctx->fb.hash_req); + return rc; +} + +static int spacc_hash_import(struct ahash_request *req, const void *in) +{ + int rc; + struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + + ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); + ctx->fb.hash_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rc = crypto_ahash_import(&ctx->fb.hash_req, in); + return rc; +} + +static int spacc_hash_export(struct ahash_request *req, void *out) +{ + int rc; + struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); + + ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); + ctx->fb.hash_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rc = crypto_ahash_export(&ctx->fb.hash_req, out); + return rc; +} + +static const struct ahash_alg spacc_hash_template = { + .init = spacc_hash_init, + .update = spacc_hash_update, + .final = spacc_hash_final, + .finup = spacc_hash_finup, + .digest = spacc_hash_digest, + .setkey = spacc_hash_setkey, + .export = spacc_hash_export, + .import = spacc_hash_import, + + .halg.base = { + .cra_priority = 300, + .cra_module = THIS_MODULE, + .cra_init = spacc_hash_cra_init, + .cra_exit = spacc_hash_cra_exit, + .cra_ctxsize = sizeof(struct spacc_crypto_ctx), + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_OPTIONAL_KEY + }, +}; + +static int spacc_register_hash(struct spacc_alg *salg) +{ + int rc; + + salg->calg = &salg->alg.hash.halg.base; + salg->alg.hash = spacc_hash_template; + + spacc_init_calg(salg->calg, salg->mode); + salg->alg.hash.halg.digestsize = salg->mode->hashlen; + salg->alg.hash.halg.statesize = spacc_set_statesize(salg); + + rc = crypto_register_ahash(&salg->alg.hash); + if (rc < 0) + return rc; + + mutex_lock(&spacc_hash_alg_mutex); + list_add(&salg->list, &spacc_hash_alg_list); + mutex_unlock(&spacc_hash_alg_mutex); + + return 0; +} + + +int probe_hashes(struct platform_device *spacc_pdev) +{ + int rc; + unsigned int i; + int registered = 0; + struct spacc_alg *salg; + struct spacc_priv *priv = dev_get_drvdata(&spacc_pdev->dev); + + spacc_hash_pool = dma_pool_create("spacc-digest", &spacc_pdev->dev, + SPACC_MAX_DIGEST_SIZE, + SPACC_DMA_ALIGN, SPACC_DMA_BOUNDARY); + + if (!spacc_hash_pool) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(possible_hashes); i++) + possible_hashes[i].valid = 0; + + for (i = 0; i < ARRAY_SIZE(possible_hashes); i++) { + if (possible_hashes[i].valid == 0 && + spacc_isenabled(&priv->spacc, + possible_hashes[i].id & 0xFF, + possible_hashes[i].hashlen)) { + + salg = kmalloc(sizeof(*salg), GFP_KERNEL); + if (!salg) + return -ENOMEM; + + salg->mode = &possible_hashes[i]; + + /* Copy all dev's over to the salg */ + salg->dev[0] = &spacc_pdev->dev; + salg->dev[1] = NULL; + + rc = spacc_register_hash(salg); + if (rc < 0) { + kfree(salg); + continue; + } + pr_debug("registered %s\n", + possible_hashes[i].name); + + registered++; + possible_hashes[i].valid = 1; + } + } + + return registered; +} + +int spacc_unregister_hash_algs(void) +{ + struct spacc_alg *salg, *tmp; + + mutex_lock(&spacc_hash_alg_mutex); + list_for_each_entry_safe(salg, tmp, &spacc_hash_alg_list, list) { + crypto_unregister_alg(salg->calg); + list_del(&salg->list); + kfree(salg); + } + mutex_unlock(&spacc_hash_alg_mutex); + + dma_pool_destroy(spacc_hash_pool); + + return 0; +} From 06af76b46c78f4729fe2f9712a74502c90d87554 Mon Sep 17 00:00:00 2001 From: Pavitrakumar M Date: Mon, 29 Jul 2024 09:43:48 +0530 Subject: [PATCH 14/96] crypto: spacc - Add SPAcc aead support Signed-off-by: shwetar Signed-off-by: Pavitrakumar M Acked-by: Ruud Derwig Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_aead.c | 1260 +++++++++++++++++++++++++ 1 file changed, 1260 insertions(+) create mode 100755 drivers/crypto/dwc-spacc/spacc_aead.c diff --git a/drivers/crypto/dwc-spacc/spacc_aead.c b/drivers/crypto/dwc-spacc/spacc_aead.c new file mode 100755 index 000000000000..3468ff605957 --- /dev/null +++ b/drivers/crypto/dwc-spacc/spacc_aead.c @@ -0,0 +1,1260 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spacc_device.h" +#include "spacc_core.h" + +static LIST_HEAD(spacc_aead_alg_list); +static DEFINE_MUTEX(spacc_aead_alg_mutex); + +#define SPACC_B0_SIZE 16 +#define SET_IV_IN_SRCBUF 0x80000000 +#define SET_IV_IN_CONTEXT 0x0 +#define AAD_BUF_SIZE 4096 +#define ADATA_BUF_SIZE (AAD_BUF_SIZE + SPACC_B0_SIZE +\ + SPACC_MAX_IV_SIZE) + +struct spacc_iv_buf { + unsigned char iv[SPACC_MAX_IV_SIZE]; + unsigned char spacc_adata[ADATA_BUF_SIZE]; + struct scatterlist sg[2], spacc_adata_sg[2]; + struct scatterlist *spacc_ptextsg, temp_aad[2]; +}; + +static struct kmem_cache *spacc_iv_pool; + +static struct mode_tab possible_aeads[] = { + { MODE_TAB_AEAD("rfc7539(chacha20,poly1305)", + CRYPTO_MODE_CHACHA20_POLY1305, CRYPTO_MODE_NULL, + 16, 12, 1), .keylen = { 16, 24, 32 } + }, + { MODE_TAB_AEAD("gcm(aes)", + CRYPTO_MODE_AES_GCM, CRYPTO_MODE_NULL, + 16, 12, 1), .keylen = { 16, 24, 32 } + }, + { MODE_TAB_AEAD("gcm(sm4)", + CRYPTO_MODE_SM4_GCM, CRYPTO_MODE_NULL, + 16, 12, 1), .keylen = { 16 } + }, + { MODE_TAB_AEAD("ccm(aes)", + CRYPTO_MODE_AES_CCM, CRYPTO_MODE_NULL, + 16, 16, 1), .keylen = { 16, 24, 32 } + }, + { MODE_TAB_AEAD("ccm(sm4)", + CRYPTO_MODE_SM4_CCM, CRYPTO_MODE_NULL, + 16, 16, 1), .keylen = { 16, 24, 32 } + }, +}; + +static void spacc_init_aead_alg(struct crypto_alg *calg, + const struct mode_tab *mode) +{ + strscpy(calg->cra_name, mode->name, sizeof(mode->name) - 1); + calg->cra_name[sizeof(mode->name) - 1] = '\0'; + + strscpy(calg->cra_driver_name, "spacc-"); + strcat(calg->cra_driver_name, mode->name); + calg->cra_driver_name[sizeof(calg->cra_driver_name) - 1] = '\0'; + + calg->cra_blocksize = mode->blocklen; +} + +static int ccm_16byte_aligned_len(int in_len) +{ + int len; + int computed_mod; + + if (in_len > 0) { + computed_mod = in_len % 16; + if (computed_mod) + len = in_len - computed_mod + 16; + else + len = in_len; + } else { + len = in_len; + } + + return len; +} + +/* taken from crypto/ccm.c */ +static int spacc_aead_format_adata(u8 *adata, unsigned int a) +{ + int len = 0; + + /* add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (a < 65280) { + *(__be16 *)adata = cpu_to_be16(a); + len = 2; + } else { + *(__be16 *)adata = cpu_to_be16(0xfffe); + *(__be32 *)&adata[2] = cpu_to_be32(a); + len = 6; + } + + return len; +} + + +/* taken from crypto/ccm.c */ +static int spacc_aead_set_msg_len(u8 *block, unsigned int msglen, int csize) +{ + __be32 data; + + memset(block, 0, csize); + block += csize; + + if (csize >= 4) + csize = 4; + else if (msglen > (unsigned int)(1 << (8 * csize))) + return -EOVERFLOW; + + data = cpu_to_be32(msglen); + memcpy(block - csize, (u8 *)&data + 4 - csize, csize); + + return 0; +} + +static int spacc_aead_init_dma(struct device *dev, struct aead_request *req, + u64 seq, uint32_t icvlen, int encrypt, int *alen) +{ + struct crypto_aead *reqtfm = crypto_aead_reqtfm(req); + struct spacc_crypto_ctx *tctx = crypto_aead_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = aead_request_ctx(req); + + gfp_t mflags = GFP_ATOMIC; + struct spacc_iv_buf *iv; + int ccm_aad_16b_len = 0; + int rc, B0len; + int payload_len, spacc_adata_sg_buf_len; + unsigned int ivsize = crypto_aead_ivsize(reqtfm); + + /* always have 1 byte of IV */ + if (!ivsize) + ivsize = 1; + + if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) + mflags = GFP_KERNEL; + + ctx->iv_buf = kmem_cache_alloc(spacc_iv_pool, mflags); + if (!ctx->iv_buf) + return -ENOMEM; + iv = ctx->iv_buf; + + sg_init_table(iv->sg, ARRAY_SIZE(iv->sg)); + sg_init_table(iv->spacc_adata_sg, ARRAY_SIZE(iv->spacc_adata_sg)); + + B0len = 0; + ctx->aead_nents = 0; + + memset(iv->iv, 0, SPACC_MAX_IV_SIZE); + memset(iv->spacc_adata, 0, ADATA_BUF_SIZE); + + /* copy the IV out for AAD */ + memcpy(iv->iv, req->iv, ivsize); + memset(iv->spacc_adata, 0, 144); + + /* now we need to figure out the cipher IV which may or + * may not be "req->iv" depending on the mode we are in + */ + if (tctx->mode & SPACC_MANGLE_IV_FLAG) { + switch (tctx->mode & 0x7F00) { + case SPACC_MANGLE_IV_RFC3686: + case SPACC_MANGLE_IV_RFC4106: + case SPACC_MANGLE_IV_RFC4543: + { + unsigned char *p = iv->spacc_adata; + /* we're in RFC3686 mode so the last + * 4 bytes of the key are the SALT + */ + memcpy(p, tctx->csalt, 4); + memcpy(p + 4, req->iv, ivsize); + + p[12] = 0; + p[13] = 0; + p[14] = 0; + p[15] = 1; + } + break; + case SPACC_MANGLE_IV_RFC4309: + { + unsigned char *p = iv->spacc_adata; + int L, M; + u32 lm = req->cryptlen; + + /* CCM mode */ + /* p[0..15] is the CTR IV */ + /* p[16..31] is the CBC-MAC B0 block*/ + B0len = SPACC_B0_SIZE; + /* IPsec requires L=4*/ + L = 4; + M = tctx->auth_size; + + /* CTR block */ + p[0] = L - 1; + memcpy(p + 1, tctx->csalt, 3); + memcpy(p + 4, req->iv, ivsize); + p[12] = 0; + p[13] = 0; + p[14] = 0; + p[15] = 1; + + /* store B0 block at p[16..31] */ + p[16] = (1 << 6) | (((M - 2) >> 1) << 3) + | (L - 1); + memcpy(p + 1 + 16, tctx->csalt, 3); + memcpy(p + 4 + 16, req->iv, ivsize); + + /* now store length */ + p[16 + 12 + 0] = (lm >> 24) & 0xFF; + p[16 + 12 + 1] = (lm >> 16) & 0xFF; + p[16 + 12 + 2] = (lm >> 8) & 0xFF; + p[16 + 12 + 3] = (lm) & 0xFF; + + /*now store the pre-formatted AAD */ + p[32] = (req->assoclen >> 8) & 0xFF; + p[33] = (req->assoclen) & 0xFF; + /* we added 2 byte header to the AAD */ + B0len += 2; + } + break; + } + } else if (tctx->mode == CRYPTO_MODE_AES_CCM || + tctx->mode == CRYPTO_MODE_SM4_CCM) { + unsigned char *p = iv->spacc_adata; + u8 *orig_iv = req->iv; + int L, M; + + u32 lm = (encrypt) ? + req->cryptlen : + req->cryptlen - tctx->auth_size; + + memset(iv->spacc_adata, 0, 144); + iv->spacc_ptextsg = req->src; + /* CCM mode */ + /* p[0..15] is the CTR IV */ + /* p[16..31] is the CBC-MAC B0 block*/ + B0len = SPACC_B0_SIZE; + + /* IPsec requires L=4 */ + L = req->iv[0] + 1; + M = tctx->auth_size; + + /* Note: rfc 3610 and NIST 800-38C require counter of + * zero to encrypt auth tag. + */ + memset(orig_iv + 15 - orig_iv[0], 0, orig_iv[0] + 1); + + /* CTR block */ + memcpy(p, req->iv, ivsize); + memcpy(p + 16, req->iv, ivsize); + + /* Taken from ccm.c + * Note: rfc 3610 and NIST 800-38C require counter of + * zero to encrypt auth tag. + */ + + /* Store B0 block at p[16..31] */ + p[16] |= (8 * ((M - 2) / 2)); + + /* set adata if assoclen > 0 */ + if (req->assoclen) + p[16] |= 64; + + /* now store length, this is L size starts from 16-L + * to 16 of B0 + */ + spacc_aead_set_msg_len(p + 16 + 16 - L, lm, L); + + if (req->assoclen) { + + /* store pre-formatted AAD: + * AAD_LEN + AAD + PAD + */ + *alen = spacc_aead_format_adata(&p[32], req->assoclen); + + ccm_aad_16b_len = + ccm_16byte_aligned_len(req->assoclen + *alen); + + /* Adding the rest of AAD from req->src */ + scatterwalk_map_and_copy(p + 32 + *alen, + req->src, 0, + req->assoclen, 0); + + /* Copy AAD to req->dst */ + scatterwalk_map_and_copy(p + 32 + *alen, req->dst, + 0, req->assoclen, 1); + + iv->spacc_ptextsg = scatterwalk_ffwd(iv->temp_aad, + req->src, req->assoclen); + } + /* default is to copy the iv over since the + * cipher and protocol IV are the same + */ + memcpy(iv->spacc_adata, req->iv, ivsize); + } + + /* this is part of the AAD */ + sg_set_buf(iv->sg, iv->iv, ivsize); + + /* GCM and CCM don't include the IV in the AAD */ + switch (tctx->mode) { + case CRYPTO_MODE_AES_GCM_RFC4106: + case CRYPTO_MODE_AES_GCM: + case CRYPTO_MODE_SM4_GCM_RFC8998: + case CRYPTO_MODE_CHACHA20_POLY1305: + case CRYPTO_MODE_NULL: + + payload_len = req->cryptlen + icvlen + req->assoclen; + spacc_adata_sg_buf_len = SPACC_MAX_IV_SIZE + B0len; + + /* this is the actual IV getting fed to the core + * (via IV IMPORT) + */ + + sg_set_buf(iv->spacc_adata_sg, iv->spacc_adata, + spacc_adata_sg_buf_len); + + sg_chain(iv->spacc_adata_sg, + sg_nents_for_len(iv->spacc_adata_sg, + spacc_adata_sg_buf_len) + 1, req->src); + + rc = spacc_sg_to_ddt(dev, iv->spacc_adata_sg, + spacc_adata_sg_buf_len + payload_len, + &ctx->src, DMA_TO_DEVICE); + + if (rc < 0) + goto err_free_iv; + ctx->aead_nents = rc; + break; + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_AES_CCM_RFC4309: + case CRYPTO_MODE_SM4_CCM: + + + if (encrypt) + payload_len = + ccm_16byte_aligned_len(req->cryptlen + icvlen); + else + payload_len = + ccm_16byte_aligned_len(req->cryptlen); + + spacc_adata_sg_buf_len = SPACC_MAX_IV_SIZE + B0len + + ccm_aad_16b_len; + + /* this is the actual IV getting fed to the core (via IV IMPORT) + * This has CTR IV + B0 + AAD(B1, B2, ...) + */ + sg_set_buf(iv->spacc_adata_sg, iv->spacc_adata, + spacc_adata_sg_buf_len); + sg_chain(iv->spacc_adata_sg, + sg_nents_for_len(iv->spacc_adata_sg, + spacc_adata_sg_buf_len) + 1, + iv->spacc_ptextsg); + + rc = spacc_sg_to_ddt(dev, iv->spacc_adata_sg, + spacc_adata_sg_buf_len + payload_len, + &ctx->src, DMA_TO_DEVICE); + if (rc < 0) + goto err_free_iv; + ctx->aead_nents = rc; + break; + default: + + /* this is the actual IV getting fed to the core (via IV IMPORT) + * This has CTR IV + B0 + AAD(B1, B2, ...) + */ + payload_len = req->cryptlen + icvlen + req->assoclen; + spacc_adata_sg_buf_len = SPACC_MAX_IV_SIZE + B0len; + sg_set_buf(iv->spacc_adata_sg, iv->spacc_adata, + spacc_adata_sg_buf_len); + + sg_chain(iv->spacc_adata_sg, + sg_nents_for_len(iv->spacc_adata_sg, + spacc_adata_sg_buf_len) + 1, + req->src); + + rc = spacc_sg_to_ddt(dev, iv->spacc_adata_sg, + spacc_adata_sg_buf_len + payload_len, + &ctx->src, DMA_TO_DEVICE); + + if (rc < 0) + goto err_free_iv; + ctx->aead_nents = rc; + } + + /* Putting in req->dst is good since it won't overwrite anything + * even in case of CCM this is fine condition + */ + if (req->dst != req->src) { + switch (tctx->mode) { + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_AES_CCM_RFC4309: + case CRYPTO_MODE_SM4_CCM: + /* If req->dst buffer len is not-positive, + * then skip setting up of DMA + */ + if (req->dst->length <= 0) { + ctx->dst_nents = 0; + return 0; + } + + if (encrypt) + payload_len = req->cryptlen + icvlen + + req->assoclen; + else + payload_len = req->cryptlen - tctx->auth_size + + req->assoclen; + + /* For corner cases where PTlen=AADlen=0, we set default + * to 16 + */ + rc = spacc_sg_to_ddt(dev, req->dst, + payload_len > 0 ? payload_len : 16, + &ctx->dst, DMA_FROM_DEVICE); + if (rc < 0) + goto err_free_src; + ctx->dst_nents = rc; + break; + default: + + /* If req->dst buffer len is not-positive, + * then skip setting up of DMA + */ + if (req->dst->length <= 0) { + ctx->dst_nents = 0; + return 0; + } + + if (encrypt) + payload_len = SPACC_MAX_IV_SIZE + req->cryptlen + + icvlen + req->assoclen; + else { + payload_len = req->cryptlen - tctx->auth_size + + req->assoclen; + if (payload_len <= 0) + return -EBADMSG; + } + + + rc = spacc_sg_to_ddt(dev, req->dst, + payload_len > 0 ? payload_len : 16, + &ctx->dst, DMA_FROM_DEVICE); + if (rc < 0) + goto err_free_src; + ctx->dst_nents = rc; + } + } + + return 0; + +err_free_src: + if (ctx->aead_nents) { + dma_unmap_sg(dev, iv->spacc_adata_sg, ctx->aead_nents, + DMA_TO_DEVICE); + + pdu_ddt_free(&ctx->src); + } + +err_free_iv: + kmem_cache_free(spacc_iv_pool, ctx->iv_buf); + + return rc; +} + +static void spacc_aead_cleanup_dma(struct device *dev, struct aead_request *req) +{ + struct spacc_crypto_reqctx *ctx = aead_request_ctx(req); + struct spacc_iv_buf *iv = ctx->iv_buf; + + if (req->src != req->dst && ctx->dst_nents > 0) { + dma_unmap_sg(dev, req->dst, ctx->dst_nents, + DMA_FROM_DEVICE); + pdu_ddt_free(&ctx->dst); + } + + if (ctx->aead_nents) { + dma_unmap_sg(dev, iv->spacc_adata_sg, ctx->aead_nents, + DMA_TO_DEVICE); + + pdu_ddt_free(&ctx->src); + } + + kmem_cache_free(spacc_iv_pool, ctx->iv_buf); +} + +static bool spacc_check_keylen(const struct spacc_alg *salg, + unsigned int keylen) +{ + unsigned int i, mask = salg->keylen_mask; + + if (mask > (1ul << ARRAY_SIZE(salg->mode->keylen)) - 1) + return false; + + for (i = 0; mask; i++, mask >>= 1) { + if (mask & 1 && salg->mode->keylen[i] == keylen) + return true; + } + + return false; +} + +static void spacc_aead_cb(void *spacc, void *tfm) +{ + struct aead_cb_data *cb = tfm; + int err = -1; + u32 status_reg = readl(cb->spacc->regmap + SPACC_REG_STATUS); + u32 status_ret = (status_reg >> 24) & 0x3; + + dma_sync_sg_for_cpu(cb->tctx->dev, cb->req->dst, + cb->ctx->dst_nents, DMA_FROM_DEVICE); + + /* ICV mismatch send bad msg */ + if (status_ret == 0x1) { + err = -EBADMSG; + goto REQ_DST_CP_SKIP; + } + err = cb->spacc->job[cb->new_handle].job_err; + +REQ_DST_CP_SKIP: + spacc_aead_cleanup_dma(cb->tctx->dev, cb->req); + spacc_close(cb->spacc, cb->new_handle); + + /* call complete */ + aead_request_complete(cb->req, err); +} + +static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); + const struct spacc_alg *salg = spacc_tfm_aead(&tfm->base); + struct spacc_priv *priv; + struct rtattr *rta = (void *)key; + struct crypto_authenc_key_param *param; + unsigned int authkeylen, enckeylen; + const unsigned char *authkey, *enckey; + unsigned char xcbc[64]; + + int err = -EINVAL; + int singlekey = 0; + + /* are keylens valid? */ + ctx->ctx_valid = false; + + switch (ctx->mode & 0xFF) { + case CRYPTO_MODE_SM4_GCM: + case CRYPTO_MODE_SM4_CCM: + case CRYPTO_MODE_NULL: + case CRYPTO_MODE_AES_GCM: + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_CHACHA20_POLY1305: + authkey = key; + authkeylen = 0; + enckey = key; + enckeylen = keylen; + ctx->keylen = keylen; + singlekey = 1; + goto skipover; + } + + if (!RTA_OK(rta, keylen) || + rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM || + RTA_PAYLOAD(rta) < sizeof(*param)) + return -EINVAL; + + param = RTA_DATA(rta); + enckeylen = be32_to_cpu(param->enckeylen); + key += RTA_ALIGN(rta->rta_len); + keylen -= RTA_ALIGN(rta->rta_len); + + if (keylen < enckeylen) + return -EINVAL; + + authkeylen = keylen - enckeylen; + + /* enckey is at &key[authkeylen] and + * authkey is at &key[0] + */ + authkey = &key[0]; + enckey = &key[authkeylen]; + +skipover: + /* detect RFC3686/4106 and trim from enckeylen(and copy salt..) */ + if (ctx->mode & SPACC_MANGLE_IV_FLAG) { + switch (ctx->mode & 0x7F00) { + case SPACC_MANGLE_IV_RFC3686: + case SPACC_MANGLE_IV_RFC4106: + case SPACC_MANGLE_IV_RFC4543: + memcpy(ctx->csalt, enckey + enckeylen - 4, 4); + enckeylen -= 4; + break; + case SPACC_MANGLE_IV_RFC4309: + memcpy(ctx->csalt, enckey + enckeylen - 3, 3); + enckeylen -= 3; + break; + } + } + + if (!singlekey) { + if (authkeylen > salg->mode->hashlen) { + dev_warn(ctx->dev, "Auth key size of %u is not valid\n", + authkeylen); + return -EINVAL; + } + } + + if (!spacc_check_keylen(salg, enckeylen)) { + dev_warn(ctx->dev, "Enc key size of %u is not valid\n", + enckeylen); + return -EINVAL; + } + + /* if we're already open close the handle since + * the size may have changed + */ + if (ctx->handle != -1) { + priv = dev_get_drvdata(ctx->dev); + spacc_close(&priv->spacc, ctx->handle); + put_device(ctx->dev); + ctx->handle = -1; + } + + /* Open a handle and + * search all devices for an open handle + */ + priv = NULL; + priv = dev_get_drvdata(salg->dev[0]); + + /* increase reference */ + ctx->dev = get_device(salg->dev[0]); + + /* check if its a valid mode ... */ + if (spacc_isenabled(&priv->spacc, salg->mode->aead.ciph & 0xFF, + enckeylen) && + spacc_isenabled(&priv->spacc, + salg->mode->aead.hash & 0xFF, authkeylen)) { + /* try to open spacc handle */ + ctx->handle = spacc_open(&priv->spacc, + salg->mode->aead.ciph & 0xFF, + salg->mode->aead.hash & 0xFF, + -1, 0, spacc_aead_cb, tfm); + } + + if (ctx->handle < 0) { + put_device(salg->dev[0]); + pr_debug("Failed to open SPAcc context\n"); + return -EIO; + } + + /* setup XCBC key */ + if (salg->mode->aead.hash == CRYPTO_MODE_MAC_XCBC) { + err = spacc_compute_xcbc_key(&priv->spacc, + salg->mode->aead.hash, + ctx->handle, authkey, + authkeylen, xcbc); + if (err < 0) { + dev_warn(ctx->dev, "Failed to compute XCBC key: %d\n", + err); + return -EIO; + } + authkey = xcbc; + authkeylen = 48; + } + + /* handle zero key/zero len DEC condition for SM4/AES GCM mode */ + ctx->zero_key = 0; + if (!key[0]) { + int i, val = 0; + + for (i = 0; i < keylen ; i++) + val += key[i]; + + if (val == 0) + ctx->zero_key = 1; + } + + err = spacc_write_context(&priv->spacc, ctx->handle, + SPACC_CRYPTO_OPERATION, enckey, + enckeylen, NULL, 0); + + if (err) { + dev_warn(ctx->dev, + "Could not write ciphering context: %d\n", err); + return -EIO; + } + + if (!singlekey) { + err = spacc_write_context(&priv->spacc, ctx->handle, + SPACC_HASH_OPERATION, authkey, + authkeylen, NULL, 0); + if (err) { + dev_warn(ctx->dev, + "Could not write hashing context: %d\n", err); + return -EIO; + } + } + + /* set expand key */ + spacc_set_key_exp(&priv->spacc, ctx->handle); + ctx->ctx_valid = true; + + memset(xcbc, 0, sizeof(xcbc)); + + /* copy key to ctx for fallback */ + memcpy(ctx->key, key, keylen); + + return 0; +} + +static int spacc_aead_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); + + ctx->auth_size = authsize; + + /* taken from crypto/ccm.c */ + switch (ctx->mode) { + case CRYPTO_MODE_SM4_GCM: + case CRYPTO_MODE_AES_GCM: + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + break; + + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_SM4_CCM: + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + break; + + case CRYPTO_MODE_CHACHA20_POLY1305: + switch (authsize) { + case 16: + break; + default: + return -EINVAL; + } + break; + } + + return 0; +} + +static int spacc_aead_fallback(struct aead_request *req, + struct spacc_crypto_ctx *ctx, int encrypt) +{ + int ret; + struct aead_request *subreq = aead_request_ctx(req); + struct crypto_aead *reqtfm = crypto_aead_reqtfm(req); + struct aead_alg *alg = crypto_aead_alg(reqtfm); + const char *aead_name = alg->base.cra_name; + + ctx->fb.aead = crypto_alloc_aead(aead_name, 0, + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); + if (!ctx->fb.aead) { + pr_err("Spacc aead fallback tfm is NULL!\n"); + return -EINVAL; + } + + subreq = aead_request_alloc(ctx->fb.aead, GFP_KERNEL); + if (!subreq) + return -ENOMEM; + + crypto_aead_setkey(ctx->fb.aead, ctx->key, ctx->keylen); + crypto_aead_setauthsize(ctx->fb.aead, ctx->auth_size); + + aead_request_set_tfm(subreq, ctx->fb.aead); + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + aead_request_set_ad(subreq, req->assoclen); + + if (encrypt) + ret = crypto_aead_encrypt(subreq); + else + ret = crypto_aead_decrypt(subreq); + + aead_request_free(subreq); + crypto_free_aead(ctx->fb.aead); + ctx->fb.aead = NULL; + + return ret; +} + +static int spacc_aead_process(struct aead_request *req, u64 seq, int encrypt) +{ + int rc; + int B0len; + int alen; + u32 dstoff; + int icvremove; + int ivaadsize; + int ptaadsize; + int iv_to_context; + int spacc_proc_len; + u32 spacc_icv_offset = 0; + int spacc_pre_aad_size; + int ccm_aad_16b_len; + struct crypto_aead *reqtfm = crypto_aead_reqtfm(req); + int ivsize = crypto_aead_ivsize(reqtfm); + struct spacc_crypto_ctx *tctx = crypto_aead_ctx(reqtfm); + struct spacc_crypto_reqctx *ctx = aead_request_ctx(req); + struct spacc_priv *priv = dev_get_drvdata(tctx->dev); + + ctx->encrypt_op = encrypt; + alen = 0; + ccm_aad_16b_len = 0; + + if (tctx->handle < 0 || !tctx->ctx_valid || (req->cryptlen + + req->assoclen) > priv->max_msg_len) + return -EINVAL; + + /* IV is programmed to context by default */ + iv_to_context = SET_IV_IN_CONTEXT; + + if (encrypt) { + switch (tctx->mode & 0xFF) { + case CRYPTO_MODE_AES_GCM: + case CRYPTO_MODE_SM4_GCM: + case CRYPTO_MODE_CHACHA20_POLY1305: + /* For cryptlen = 0 */ + if (req->cryptlen + req->assoclen == 0) + return spacc_aead_fallback(req, tctx, encrypt); + break; + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_SM4_CCM: + + if (req->cryptlen + req->assoclen == 0) + return spacc_aead_fallback(req, tctx, encrypt); + + /* verify that msglen can in fact be represented + * in L bytes + */ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (req->iv[0] < 1 || req->iv[0] > 7) + return -EINVAL; + + break; + default: + pr_debug("Unsupported algo"); + return -EINVAL; + } + } else { + /* Handle the decryption */ + switch (tctx->mode & 0xFF) { + case CRYPTO_MODE_AES_GCM: + case CRYPTO_MODE_SM4_GCM: + case CRYPTO_MODE_CHACHA20_POLY1305: + /* For assoclen = 0 */ + if (req->assoclen == 0 && + (req->cryptlen - tctx->auth_size == 0)) + return spacc_aead_fallback(req, tctx, encrypt); + break; + case CRYPTO_MODE_AES_CCM: + case CRYPTO_MODE_SM4_CCM: + + if (req->assoclen == 0 && + (req->cryptlen - tctx->auth_size == 0)) + return spacc_aead_fallback(req, tctx, encrypt); + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (req->iv[0] < 1 || req->iv[0] > 7) + return -EINVAL; + break; + default: + pr_debug("Unsupported algo"); + return -EINVAL; + } + } + + icvremove = (encrypt) ? 0 : tctx->auth_size; + + rc = spacc_aead_init_dma(tctx->dev, req, seq, (encrypt) ? + tctx->auth_size : 0, encrypt, &alen); + if (rc < 0) + return -EINVAL; + + if (req->assoclen) + ccm_aad_16b_len = ccm_16byte_aligned_len(req->assoclen + alen); + + /* Note: This won't work if IV_IMPORT has been disabled */ + ctx->cb.new_handle = spacc_clone_handle(&priv->spacc, tctx->handle, + &ctx->cb); + if (ctx->cb.new_handle < 0) { + spacc_aead_cleanup_dma(tctx->dev, req); + return -EINVAL; + } + + ctx->cb.tctx = tctx; + ctx->cb.ctx = ctx; + ctx->cb.req = req; + ctx->cb.spacc = &priv->spacc; + + /* Write IV to the spacc-context + * IV can be written to context or as part of the input src buffer + * IV in case of CCM is going in the input src buff. + * IV for GCM is written to the context. + */ + if (tctx->mode == CRYPTO_MODE_AES_GCM_RFC4106 || + tctx->mode == CRYPTO_MODE_AES_GCM || + tctx->mode == CRYPTO_MODE_SM4_GCM_RFC8998 || + tctx->mode == CRYPTO_MODE_CHACHA20_POLY1305 || + tctx->mode == CRYPTO_MODE_NULL) { + iv_to_context = SET_IV_IN_CONTEXT; + rc = spacc_write_context(&priv->spacc, ctx->cb.new_handle, + SPACC_CRYPTO_OPERATION, NULL, 0, + req->iv, ivsize); + + if (rc < 0) { + spacc_aead_cleanup_dma(tctx->dev, req); + spacc_close(&priv->spacc, ctx->cb.new_handle); + return -EINVAL; + } + } + + /* CCM and GCM don't include the IV in the AAD */ + if (tctx->mode == CRYPTO_MODE_AES_GCM_RFC4106 || + tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || + tctx->mode == CRYPTO_MODE_AES_GCM || + tctx->mode == CRYPTO_MODE_AES_CCM || + tctx->mode == CRYPTO_MODE_SM4_CCM || + tctx->mode == CRYPTO_MODE_SM4_GCM_RFC8998 || + tctx->mode == CRYPTO_MODE_CHACHA20_POLY1305 || + tctx->mode == CRYPTO_MODE_NULL) { + ivaadsize = 0; + } else { + ivaadsize = ivsize; + } + + /* CCM requires an extra block of AAD */ + if (tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || + tctx->mode == CRYPTO_MODE_AES_CCM || + tctx->mode == CRYPTO_MODE_SM4_CCM) + B0len = SPACC_B0_SIZE; + else + B0len = 0; + + /* GMAC mode uses AAD for the entire message. + * So does NULL cipher + */ + if (tctx->mode == CRYPTO_MODE_AES_GCM_RFC4543 || + tctx->mode == CRYPTO_MODE_NULL) { + if (req->cryptlen >= icvremove) + ptaadsize = req->cryptlen - icvremove; + } else { + ptaadsize = 0; + } + + /* Calculate and set the below, important parameters + * spacc icv offset - spacc_icv_offset + * destination offset - dstoff + * IV to context - This is set for CCM, not set for GCM + */ + if (req->dst == req->src) { + dstoff = ((uint32_t)(SPACC_MAX_IV_SIZE + B0len + + req->assoclen + ivaadsize)); + + /* CCM case */ + if (tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || + tctx->mode == CRYPTO_MODE_AES_CCM || + tctx->mode == CRYPTO_MODE_SM4_CCM) { + iv_to_context = SET_IV_IN_SRCBUF; + dstoff = ((uint32_t)(SPACC_MAX_IV_SIZE + B0len + + ccm_aad_16b_len + ivaadsize)); + } + + } else { + dstoff = ((uint32_t)(req->assoclen + ivaadsize)); + + /* CCM case */ + if (tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || + tctx->mode == CRYPTO_MODE_AES_CCM || + tctx->mode == CRYPTO_MODE_SM4_CCM) { + iv_to_context = SET_IV_IN_SRCBUF; + dstoff = ((uint32_t)(req->assoclen + ivaadsize)); + + } + } + + /* Calculate and set the below, important parameters + * spacc proc_len - spacc_proc_len + * pre-AAD size - spacc_pre_aad_size + */ + if (tctx->mode == CRYPTO_MODE_AES_CCM || + tctx->mode == CRYPTO_MODE_SM4_CCM || + tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || + tctx->mode == CRYPTO_MODE_SM4_CCM_RFC8998) { + spacc_proc_len = B0len + ccm_aad_16b_len + + req->cryptlen + ivaadsize + - icvremove; + spacc_pre_aad_size = B0len + ccm_aad_16b_len + + ivaadsize + ptaadsize; + + } else { + spacc_proc_len = B0len + req->assoclen + + req->cryptlen - icvremove + + ivaadsize; + spacc_pre_aad_size = B0len + req->assoclen + + ivaadsize + ptaadsize; + } + + rc = spacc_set_operation(&priv->spacc, + ctx->cb.new_handle, + encrypt ? OP_ENCRYPT : OP_DECRYPT, + ICV_ENCRYPT_HASH, IP_ICV_APPEND, + spacc_icv_offset, + tctx->auth_size, 0); + + rc = spacc_packet_enqueue_ddt(&priv->spacc, ctx->cb.new_handle, + &ctx->src, + (req->dst == req->src) ? &ctx->src : + &ctx->dst, spacc_proc_len, + (dstoff << SPACC_OFFSET_DST_O) | + SPACC_MAX_IV_SIZE, + spacc_pre_aad_size, + 0, iv_to_context, 0); + + if (rc < 0) { + spacc_aead_cleanup_dma(tctx->dev, req); + spacc_close(&priv->spacc, ctx->cb.new_handle); + + if (rc != -EBUSY) { + dev_err(tctx->dev, " failed to enqueue job, ERR: %d\n", + rc); + } + + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + + return -EINVAL; + } + + /* At this point the job is in flight to the engine ... remove first use + * so subsequent calls don't expand the key again... ideally we would + * pump a dummy job through the engine to pre-expand the key so that by + * the time setkey was done we wouldn't have to do this + */ + priv->spacc.job[tctx->handle].first_use = 0; + priv->spacc.job[tctx->handle].ctrl &= ~(1UL + << priv->spacc.config.ctrl_map[SPACC_CTRL_KEY_EXP]); + + return -EINPROGRESS; +} + +static int spacc_aead_encrypt(struct aead_request *req) +{ + return spacc_aead_process(req, 0ULL, 1); +} + +static int spacc_aead_decrypt(struct aead_request *req) +{ + return spacc_aead_process(req, 0ULL, 0); +} + +static int spacc_aead_init(struct crypto_aead *tfm) +{ + struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); + const struct spacc_alg *salg = spacc_tfm_aead(&tfm->base); + + crypto_aead_set_reqsize(tfm, sizeof(struct spacc_crypto_reqctx)); + + ctx->zero_key = 0; + ctx->fb.aead = NULL; + ctx->handle = -1; + ctx->mode = salg->mode->aead.ciph; + ctx->dev = get_device(salg->dev[0]); + + return 0; +} + +static void spacc_aead_exit(struct crypto_aead *tfm) +{ + struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct spacc_priv *priv = dev_get_drvdata(ctx->dev); + + ctx->fb.aead = NULL; + /* close spacc handle */ + if (ctx->handle >= 0) { + spacc_close(&priv->spacc, ctx->handle); + ctx->handle = -1; + } + + put_device(ctx->dev); +} + +static struct aead_alg spacc_aead_algs = { + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .init = spacc_aead_init, + .exit = spacc_aead_exit, + + .base.cra_priority = 300, + .base.cra_module = THIS_MODULE, + .base.cra_ctxsize = sizeof(struct spacc_crypto_ctx), + .base.cra_flags = CRYPTO_ALG_TYPE_AEAD + | CRYPTO_ALG_ASYNC + | CRYPTO_ALG_NEED_FALLBACK + | CRYPTO_ALG_KERN_DRIVER_ONLY + | CRYPTO_ALG_OPTIONAL_KEY +}; + +static int spacc_register_aead(unsigned int aead_mode, + struct platform_device *spacc_pdev) +{ + int rc; + struct spacc_alg *salg; + + salg = kmalloc(sizeof(*salg), GFP_KERNEL); + if (!salg) + return -ENOMEM; + + salg->mode = &possible_aeads[aead_mode]; + salg->dev[0] = &spacc_pdev->dev; + salg->dev[1] = NULL; + salg->calg = &salg->alg.aead.base; + salg->alg.aead = spacc_aead_algs; + + spacc_init_aead_alg(salg->calg, salg->mode); + + salg->alg.aead.ivsize = salg->mode->ivlen; + salg->alg.aead.maxauthsize = salg->mode->hashlen; + salg->alg.aead.base.cra_blocksize = salg->mode->blocklen; + + salg->keylen_mask = possible_aeads[aead_mode].keylen_mask; + + if (salg->mode->aead.ciph & SPACC_MANGLE_IV_FLAG) { + switch (salg->mode->aead.ciph & 0x7F00) { + case SPACC_MANGLE_IV_RFC3686: /*CTR*/ + case SPACC_MANGLE_IV_RFC4106: /*GCM*/ + case SPACC_MANGLE_IV_RFC4543: /*GMAC*/ + case SPACC_MANGLE_IV_RFC4309: /*CCM*/ + case SPACC_MANGLE_IV_RFC8998: /*GCM/CCM*/ + salg->alg.aead.ivsize = 12; + break; + } + } + + rc = crypto_register_aead(&salg->alg.aead); + if (rc < 0) { + kfree(salg); + return rc; + } + + dev_dbg(salg->dev[0], "Registered %s\n", salg->mode->name); + + mutex_lock(&spacc_aead_alg_mutex); + list_add(&salg->list, &spacc_aead_alg_list); + mutex_unlock(&spacc_aead_alg_mutex); + + return 0; +} + +int probe_aeads(struct platform_device *spacc_pdev) +{ + int err; + unsigned int x, y; + struct spacc_priv *priv = NULL; + + size_t alloc_size = max_t(unsigned long, + roundup_pow_of_two(sizeof(struct spacc_iv_buf)), + dma_get_cache_alignment()); + + spacc_iv_pool = kmem_cache_create("spacc-aead-iv", alloc_size, + alloc_size, 0, NULL); + + if (!spacc_iv_pool) + return -ENOMEM; + + for (x = 0; x < ARRAY_SIZE(possible_aeads); x++) { + possible_aeads[x].keylen_mask = 0; + possible_aeads[x].valid = 0; + } + + /* compute cipher key masks (over all devices) */ + priv = dev_get_drvdata(&spacc_pdev->dev); + + for (x = 0; x < ARRAY_SIZE(possible_aeads); x++) { + for (y = 0; y < ARRAY_SIZE(possible_aeads[x].keylen); y++) { + if (spacc_isenabled(&priv->spacc, + possible_aeads[x].aead.ciph & 0xFF, + possible_aeads[x].keylen[y])) + possible_aeads[x].keylen_mask |= 1u << y; + } + } + + /* scan for combined modes */ + priv = dev_get_drvdata(&spacc_pdev->dev); + + for (x = 0; x < ARRAY_SIZE(possible_aeads); x++) { + if (!possible_aeads[x].valid && possible_aeads[x].keylen_mask) { + if (spacc_isenabled(&priv->spacc, + possible_aeads[x].aead.hash & 0xFF, + possible_aeads[x].hashlen)) { + + possible_aeads[x].valid = 1; + err = spacc_register_aead(x, spacc_pdev); + if (err < 0) + goto error; + } + } + } + + return 0; + +error: + return err; +} + +int spacc_unregister_aead_algs(void) +{ + struct spacc_alg *salg, *tmp; + + mutex_lock(&spacc_aead_alg_mutex); + + list_for_each_entry_safe(salg, tmp, &spacc_aead_alg_list, list) { + crypto_unregister_alg(salg->calg); + list_del(&salg->list); + kfree(salg); + } + + mutex_unlock(&spacc_aead_alg_mutex); + + kmem_cache_destroy(spacc_iv_pool); + + return 0; +} From cb67c924b2a7b561bd7f4f2bd66766337c1007b7 Mon Sep 17 00:00:00 2001 From: Pavitrakumar M Date: Mon, 29 Jul 2024 09:43:49 +0530 Subject: [PATCH 15/96] crypto: spacc - Add SPAcc Kconfig and Makefile Signed-off-by: shwetar Signed-off-by: Pavitrakumar M Acked-by: Ruud Derwig Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/Kconfig | 95 +++++++++++++++++++++++++++++++ drivers/crypto/dwc-spacc/Makefile | 16 ++++++ 2 files changed, 111 insertions(+) create mode 100644 drivers/crypto/dwc-spacc/Kconfig create mode 100644 drivers/crypto/dwc-spacc/Makefile diff --git a/drivers/crypto/dwc-spacc/Kconfig b/drivers/crypto/dwc-spacc/Kconfig new file mode 100644 index 000000000000..9eb41a295f9d --- /dev/null +++ b/drivers/crypto/dwc-spacc/Kconfig @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_DEV_SPACC + tristate "Support for dw_spacc Security protocol accelerators" + depends on HAS_DMA + default m + + help + This enables support for the HASH/CRYP/AEAD hw accelerator which can be found + on dw_spacc IP. + +config CRYPTO_DEV_SPACC_CIPHER + bool "Enable CIPHER functionality" + depends on CRYPTO_DEV_SPACC + default y + select CRYPTO_SKCIPHER + select CRYPTO_LIB_DES + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_CTR + select CRYPTO_XTS + select CRYPTO_CTS + select CRYPTO_OFB + select CRYPTO_CFB + select CRYPTO_SM4_GENERIC + select CRYPTO_CHACHA20 + + help + Say y to enable Cipher functionality of SPACC. + +config CRYPTO_DEV_SPACC_HASH + bool "Enable HASH functionality" + depends on CRYPTO_DEV_SPACC + default y + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_MD5 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_HMAC + select CRYPTO_SM3 + select CRYPTO_CMAC + select CRYPTO_MICHAEL_MIC + select CRYPTO_XCBC + select CRYPTO_AES + select CRYPTO_SM4_GENERIC + + help + Say y to enable Hash functionality of SPACC. + +config CRYPTO_DEV_SPACC_AEAD + bool "Enable AEAD functionality" + depends on CRYPTO_DEV_SPACC + default y + select CRYPTO_AEAD + select CRYPTO_AUTHENC + select CRYPTO_AES + select CRYPTO_SM4_GENERIC + select CRYPTO_CHACHAPOLY1305 + select CRYPTO_GCM + select CRYPTO_CCM + + help + Say y to enable AEAD functionality of SPACC. + +config CRYPTO_DEV_SPACC_AUTODETECT + bool "Enable Autodetect functionality" + depends on CRYPTO_DEV_SPACC + default y + help + Say y to enable Autodetect functionality + +config CRYPTO_DEV_SPACC_DEBUG_TRACE_IO + bool "Enable Trace MMIO reads/writes stats" + depends on CRYPTO_DEV_SPACC + default n + help + Say y to enable Trace MMIO reads/writes stats. + To Debug and trace IO register read/write opration + +config CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT + bool "Enable Trace DDT entries stats" + default n + depends on CRYPTO_DEV_SPACC + help + Say y to enable Enable Trace DDT entries stats. + To Debug and trace DDT opration + +config CRYPTO_DEV_SPACC_SECURE_MODE + bool "Enable Spacc secure mode stats" + default n + depends on CRYPTO_DEV_SPACC + help + Say y to enable Spacc secure modes stats. diff --git a/drivers/crypto/dwc-spacc/Makefile b/drivers/crypto/dwc-spacc/Makefile new file mode 100644 index 000000000000..bf46c8e13a31 --- /dev/null +++ b/drivers/crypto/dwc-spacc/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CRYPTO_DEV_SPACC) += snps-spacc.o +snps-spacc-objs = spacc_hal.o spacc_core.o \ +spacc_manager.o spacc_interrupt.o spacc_device.o + +ifeq ($(CONFIG_CRYPTO_DEV_SPACC_HASH),y) +snps-spacc-objs += spacc_ahash.o +endif + +ifeq ($(CONFIG_CRYPTO_DEV_SPACC_CIPHER),y) +snps-spacc-objs += spacc_skcipher.o +endif + +ifeq ($(CONFIG_CRYPTO_DEV_SPACC_AEAD),y) +snps-spacc-objs += spacc_aead.o +endif From fc61c658c94cb7405ca6946d8f2a2b71cef49845 Mon Sep 17 00:00:00 2001 From: Pavitrakumar M Date: Mon, 29 Jul 2024 09:43:50 +0530 Subject: [PATCH 16/96] crypto: spacc - Enable Driver compilation in crypto Kconfig and Makefile Signed-off-by: Bhoomika K Signed-off-by: Pavitrakumar M Acked-by: Ruud Derwig Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + drivers/crypto/Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 94f23c6fc93b..009cbd0e1993 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -696,6 +696,7 @@ config CRYPTO_DEV_BCM_SPU ahash, and aead algorithms with the kernel cryptographic API. source "drivers/crypto/stm32/Kconfig" +source "drivers/crypto/dwc-spacc/Kconfig" config CRYPTO_DEV_SAFEXCEL tristate "Inside Secure's SafeXcel cryptographic engine driver" diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index ad4ccef67d12..a937e8f5849b 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ obj-y += xilinx/ +obj-y += dwc-spacc/ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ From 9d3a7ff2ce1781a77ad6f8896e1256875c17631e Mon Sep 17 00:00:00 2001 From: Francesco Dolcini Date: Mon, 29 Jul 2024 13:36:40 +0200 Subject: [PATCH 17/96] hwrng: cn10k - Enable by default CN10K driver if Thunder SoC is enabled Before commit addea5858b66 ("hwrng: Kconfig - Do not enable by default CN10K driver") the Marvell CN10K Random Number Generator was always enabled when HW_RANDOM was enabled. This was changed with that commit to prevent having this driver being always enabled on arm64. To prevent introducing regression with some old defconfig enable the driver when ARCH_THUNDER is enabled. Fixes: addea5858b66 ("hwrng: Kconfig - Do not enable by default CN10K driver") Closes: https://lore.kernel.org/all/SN7PR18MB53144B37B82ADEEC5D35AE0CE3AC2@SN7PR18MB5314.namprd18.prod.outlook.com/ Signed-off-by: Francesco Dolcini Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 01e2e1ef82cf..ae5f3a01f554 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -555,6 +555,7 @@ config HW_RANDOM_ARM_SMCCC_TRNG config HW_RANDOM_CN10K tristate "Marvell CN10K Random Number Generator support" depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) + default HW_RANDOM if ARCH_THUNDER help This driver provides support for the True Random Number generator available in Marvell CN10K SoCs. From f5903f50070518b6722559d25744f8a8dbe5011e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 30 Jul 2024 11:41:55 +0800 Subject: [PATCH 18/96] crypto: caam/qi* - Use cpumask_var_t instead of cpumask_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch cpumask_t to cpumask_var_t as the former may be too big for the stack: CC [M] drivers/crypto/caam/qi.o CC [M] drivers/crypto/caam/caamalg_qi2.o ../drivers/crypto/caam/qi.c: In function ‘caam_qi_init’: ../drivers/crypto/caam/qi.c:808:1: warning: the frame size of 1056 bytes is larger than 1024 bytes [-Wframe-larger-than=] 808 | } | ^ CHECK ../drivers/crypto/caam/qi.c ../drivers/crypto/caam/caamalg_qi2.c: In function ‘dpaa2_dpseci_setup’: ../drivers/crypto/caam/caamalg_qi2.c:5135:1: warning: the frame size of 1032 bytes is larger than 1024 bytes [-Wframe-larger-than=] 5135 | } | ^ Also fix the error path handling in qi.c. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 17 ++++++++++++----- drivers/crypto/caam/qi.c | 31 ++++++++++++++++++++----------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 207dc422785a..44e1f8f46967 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -5006,10 +5006,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) struct device *dev = &ls_dev->dev; struct dpaa2_caam_priv *priv; struct dpaa2_caam_priv_per_cpu *ppriv; - cpumask_t clean_mask; + cpumask_var_t clean_mask; int err, cpu; u8 i; + err = -ENOMEM; + if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL)) + goto err_cpumask; + priv = dev_get_drvdata(dev); priv->dev = dev; @@ -5085,7 +5089,6 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) } } - cpumask_clear(&clean_mask); i = 0; for_each_online_cpu(cpu) { u8 j; @@ -5114,7 +5117,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) err = -ENOMEM; goto err_alloc_netdev; } - cpumask_set_cpu(cpu, &clean_mask); + cpumask_set_cpu(cpu, clean_mask); ppriv->net_dev->dev = *dev; netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi, @@ -5122,15 +5125,19 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) DPAA2_CAAM_NAPI_WEIGHT); } - return 0; + err = 0; + goto free_cpumask; err_alloc_netdev: - free_dpaa2_pcpu_netdev(priv, &clean_mask); + free_dpaa2_pcpu_netdev(priv, clean_mask); err_get_rx_queue: dpaa2_dpseci_congestion_free(priv); err_get_vers: dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); err_open: +free_cpumask: + free_cpumask_var(clean_mask); +err_cpumask: return err; } diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index ba8fb5d8a7b2..f6111ee9ed34 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -736,7 +736,11 @@ int caam_qi_init(struct platform_device *caam_pdev) struct device *ctrldev = &caam_pdev->dev, *qidev; struct caam_drv_private *ctrlpriv; const cpumask_t *cpus = qman_affine_cpus(); - cpumask_t clean_mask; + cpumask_var_t clean_mask; + + err = -ENOMEM; + if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL)) + goto fail_cpumask; ctrlpriv = dev_get_drvdata(ctrldev); qidev = ctrldev; @@ -745,19 +749,16 @@ int caam_qi_init(struct platform_device *caam_pdev) err = init_cgr(qidev); if (err) { dev_err(qidev, "CGR initialization failed: %d\n", err); - return err; + goto fail_cgr; } /* Initialise response FQs */ err = alloc_rsp_fqs(qidev); if (err) { dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); - free_rsp_fqs(); - return err; + goto fail_fqs; } - cpumask_clear(&clean_mask); - /* * Enable the NAPI contexts on each of the core which has an affine * portal. @@ -773,7 +774,7 @@ int caam_qi_init(struct platform_device *caam_pdev) err = -ENOMEM; goto fail; } - cpumask_set_cpu(i, &clean_mask); + cpumask_set_cpu(i, clean_mask); priv->net_dev = net_dev; net_dev->dev = *qidev; @@ -788,7 +789,7 @@ int caam_qi_init(struct platform_device *caam_pdev) if (!qi_cache) { dev_err(qidev, "Can't allocate CAAM cache\n"); err = -ENOMEM; - goto fail2; + goto fail; } caam_debugfs_qi_init(ctrlpriv); @@ -798,11 +799,19 @@ int caam_qi_init(struct platform_device *caam_pdev) goto fail2; dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); - return 0; + goto free_cpumask; fail2: - free_rsp_fqs(); + kmem_cache_destroy(qi_cache); fail: - free_caam_qi_pcpu_netdev(&clean_mask); + free_caam_qi_pcpu_netdev(clean_mask); +fail_fqs: + free_rsp_fqs(); + qman_delete_cgr_safe(&qipriv.cgr); + qman_release_cgrid(qipriv.cgr.cgrid); +fail_cgr: +free_cpumask: + free_cpumask_var(clean_mask); +fail_cpumask: return err; } From 16fd38ab651ecebf1ff3d637f437f17e88cdc777 Mon Sep 17 00:00:00 2001 From: Aurelien Jarno Date: Tue, 30 Jul 2024 17:08:19 +0100 Subject: [PATCH 19/96] dt-bindings: rng: Add Rockchip RK3568 TRNG Add the True Random Number Generator on the Rockchip RK3568 SoC. Signed-off-by: Aurelien Jarno Signed-off-by: Daniel Golle Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- .../bindings/rng/rockchip,rk3568-rng.yaml | 61 +++++++++++++++++++ MAINTAINERS | 6 ++ 2 files changed, 67 insertions(+) create mode 100644 Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml diff --git a/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml b/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml new file mode 100644 index 000000000000..e0595814a6d9 --- /dev/null +++ b/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/rockchip,rk3568-rng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip RK3568 TRNG + +description: True Random Number Generator on Rockchip RK3568 SoC + +maintainers: + - Aurelien Jarno + - Daniel Golle + +properties: + compatible: + enum: + - rockchip,rk3568-rng + + reg: + maxItems: 1 + + clocks: + items: + - description: TRNG clock + - description: TRNG AHB clock + + clock-names: + items: + - const: core + - const: ahb + + resets: + maxItems: 1 + +required: + - compatible + - reg + - clocks + - clock-names + - resets + +additionalProperties: false + +examples: + - | + #include + bus { + #address-cells = <2>; + #size-cells = <2>; + + rng@fe388000 { + compatible = "rockchip,rk3568-rng"; + reg = <0x0 0xfe388000 0x0 0x4000>; + clocks = <&cru CLK_TRNG_NS>, <&cru HCLK_TRNG_NS>; + clock-names = "core", "ahb"; + resets = <&cru SRST_TRNG_NS>; + }; + }; + +... diff --git a/MAINTAINERS b/MAINTAINERS index 42decde38320..0448c7eb3934 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -19722,6 +19722,12 @@ F: Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst F: drivers/media/platform/rockchip/rkisp1 F: include/uapi/linux/rkisp1-config.h +ROCKCHIP RK3568 RANDOM NUMBER GENERATOR SUPPORT +M: Daniel Golle +M: Aurelien Jarno +S: Maintained +F: Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml + ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER M: Jacob Chen M: Ezequiel Garcia From dcf4fef6631c302f9bdd188979fe3172e47a29c7 Mon Sep 17 00:00:00 2001 From: Aurelien Jarno Date: Tue, 30 Jul 2024 17:11:04 +0100 Subject: [PATCH 20/96] hwrng: rockchip - add hwrng driver for Rockchip RK3568 SoC Rockchip SoCs used to have a random number generator as part of their crypto device, and support for it has to be added to the corresponding driver. However newer Rockchip SoCs like the RK3568 have an independent True Random Number Generator device. This patch adds a driver for it, greatly inspired from the downstream driver. The TRNG device does not seem to have a signal conditionner and the FIPS 140-2 test returns a lot of failures. They can be reduced by increasing RK_RNG_SAMPLE_CNT, in a tradeoff between quality and speed. This value has been adjusted to get ~90% of successes and the quality value has been set accordingly. Signed-off-by: Aurelien Jarno [daniel@makrotpia.org: code style fixes] Signed-off-by: Daniel Golle Acked-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- MAINTAINERS | 1 + drivers/char/hw_random/Kconfig | 14 ++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/rockchip-rng.c | 227 ++++++++++++++++++++++++++ 4 files changed, 243 insertions(+) create mode 100644 drivers/char/hw_random/rockchip-rng.c diff --git a/MAINTAINERS b/MAINTAINERS index 0448c7eb3934..a0b4c1210e92 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -19727,6 +19727,7 @@ M: Daniel Golle M: Aurelien Jarno S: Maintained F: Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml +F: drivers/char/hw_random/rockchip-rng.c ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER M: Jacob Chen diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index ae5f3a01f554..b51d9e243f35 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -573,6 +573,20 @@ config HW_RANDOM_JH7110 To compile this driver as a module, choose M here. The module will be called jh7110-trng. +config HW_RANDOM_ROCKCHIP + tristate "Rockchip True Random Number Generator" + depends on HW_RANDOM && (ARCH_ROCKCHIP || COMPILE_TEST) + depends on HAS_IOMEM + default HW_RANDOM + help + This driver provides kernel-side support for the True Random Number + Generator hardware found on some Rockchip SoC like RK3566 or RK3568. + + To compile this driver as a module, choose M here: the + module will be called rockchip-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 32549a1186dc..01f012eab440 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -48,4 +48,5 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o +obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c new file mode 100644 index 000000000000..548e2f4d1490 --- /dev/null +++ b/drivers/char/hw_random/rockchip-rng.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rockchip-rng.c True Random Number Generator driver for Rockchip RK3568 SoC + * + * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd. + * Copyright (c) 2022, Aurelien Jarno + * Authors: + * Lin Jinhan + * Aurelien Jarno + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RK_RNG_AUTOSUSPEND_DELAY 100 +#define RK_RNG_MAX_BYTE 32 +#define RK_RNG_POLL_PERIOD_US 100 +#define RK_RNG_POLL_TIMEOUT_US 10000 + +/* + * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is + * a tradeoff between speed and quality and has been adjusted to get a quality + * of ~900 (~87.5% of FIPS 140-2 successes). + */ +#define RK_RNG_SAMPLE_CNT 1000 + +/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */ +#define TRNG_RST_CTL 0x0004 +#define TRNG_RNG_CTL 0x0400 +#define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4) +#define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4) +#define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4) +#define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4) +#define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2) +#define TRNG_RNG_CTL_MASK GENMASK(15, 0) +#define TRNG_RNG_CTL_ENABLE BIT(1) +#define TRNG_RNG_CTL_START BIT(0) +#define TRNG_RNG_SAMPLE_CNT 0x0404 +#define TRNG_RNG_DOUT 0x0410 + +struct rk_rng { + struct hwrng rng; + void __iomem *base; + struct reset_control *rst; + int clk_num; + struct clk_bulk_data *clk_bulks; +}; + +/* The mask in the upper 16 bits determines the bits that are updated */ +static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask) +{ + writel((mask << 16) | val, rng->base + TRNG_RNG_CTL); +} + +static int rk_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + int ret; + + /* start clocks */ + ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks); + if (ret < 0) { + dev_err((struct device *) rk_rng->rng.priv, + "Failed to enable clks %d\n", ret); + return ret; + } + + /* set the sample period */ + writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT); + + /* set osc ring speed and enable it */ + rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT | + TRNG_RNG_CTL_OSC_RING_SPEED_0 | + TRNG_RNG_CTL_ENABLE, + TRNG_RNG_CTL_MASK); + + return 0; +} + +static void rk_rng_cleanup(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + /* stop TRNG */ + rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK); + + /* stop clocks */ + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); +} + +static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); + u32 reg; + int ret = 0; + + ret = pm_runtime_resume_and_get((struct device *) rk_rng->rng.priv); + if (ret < 0) + return ret; + + /* Start collecting random data */ + rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START); + + ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg, + !(reg & TRNG_RNG_CTL_START), + RK_RNG_POLL_PERIOD_US, + RK_RNG_POLL_TIMEOUT_US); + if (ret < 0) + goto out; + + /* Read random data stored in the registers */ + memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read); +out: + pm_runtime_mark_last_busy((struct device *) rk_rng->rng.priv); + pm_runtime_put_sync_autosuspend((struct device *) rk_rng->rng.priv); + + return (ret < 0) ? ret : to_read; +} + +static int rk_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rk_rng *rk_rng; + int ret; + + rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL); + if (!rk_rng) + return -ENOMEM; + + rk_rng->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rk_rng->base)) + return PTR_ERR(rk_rng->base); + + rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks); + if (rk_rng->clk_num < 0) + return dev_err_probe(dev, rk_rng->clk_num, + "Failed to get clks property\n"); + + rk_rng->rst = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(rk_rng->rst)) + return dev_err_probe(dev, PTR_ERR(rk_rng->rst), + "Failed to get reset property\n"); + + reset_control_assert(rk_rng->rst); + udelay(2); + reset_control_deassert(rk_rng->rst); + + platform_set_drvdata(pdev, rk_rng); + + rk_rng->rng.name = dev_driver_string(dev); + if (!IS_ENABLED(CONFIG_PM)) { + rk_rng->rng.init = rk_rng_init; + rk_rng->rng.cleanup = rk_rng_cleanup; + } + rk_rng->rng.read = rk_rng_read; + rk_rng->rng.priv = (unsigned long) dev; + rk_rng->rng.quality = 900; + + pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + devm_pm_runtime_enable(dev); + + ret = devm_hwrng_register(dev, &rk_rng->rng); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register Rockchip hwrng\n"); + + return 0; +} + +static int __maybe_unused rk_rng_runtime_suspend(struct device *dev) +{ + struct rk_rng *rk_rng = dev_get_drvdata(dev); + + rk_rng_cleanup(&rk_rng->rng); + + return 0; +} + +static int __maybe_unused rk_rng_runtime_resume(struct device *dev) +{ + struct rk_rng *rk_rng = dev_get_drvdata(dev); + + return rk_rng_init(&rk_rng->rng); +} + +static const struct dev_pm_ops rk_rng_pm_ops = { + SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend, + rk_rng_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static const struct of_device_id rk_rng_dt_match[] = { + { .compatible = "rockchip,rk3568-rng", }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, rk_rng_dt_match); + +static struct platform_driver rk_rng_driver = { + .driver = { + .name = "rockchip-rng", + .pm = &rk_rng_pm_ops, + .of_match_table = rk_rng_dt_match, + }, + .probe = rk_rng_probe, +}; + +module_platform_driver(rk_rng_driver); + +MODULE_DESCRIPTION("Rockchip RK3568 True Random Number Generator driver"); +MODULE_AUTHOR("Lin Jinhan "); +MODULE_AUTHOR("Aurelien Jarno "); +MODULE_AUTHOR("Daniel Golle "); +MODULE_LICENSE("GPL"); From da4fe6815aca25603944a64b0965310512e867d0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 Aug 2024 14:09:00 +0800 Subject: [PATCH 21/96] Revert "lib/mpi: Introduce ec implementation to MPI library" This reverts commit d58bb7e55a8a65894cc02f27c3e2bf9403e7c40f. It's no longer needed since sm2 has been removed. Signed-off-by: Herbert Xu --- include/linux/mpi.h | 105 --- lib/crypto/mpi/Makefile | 1 - lib/crypto/mpi/ec.c | 1507 --------------------------------------- 3 files changed, 1613 deletions(-) delete mode 100644 lib/crypto/mpi/ec.c diff --git a/include/linux/mpi.h b/include/linux/mpi.h index eb0d1c1db208..89b720893e12 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -157,111 +157,6 @@ void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor); /*-- mpi-inv.c --*/ int mpi_invm(MPI x, MPI a, MPI n); -/*-- ec.c --*/ - -/* Object to represent a point in projective coordinates */ -struct gcry_mpi_point { - MPI x; - MPI y; - MPI z; -}; - -typedef struct gcry_mpi_point *MPI_POINT; - -/* Models describing an elliptic curve */ -enum gcry_mpi_ec_models { - /* The Short Weierstrass equation is - * y^2 = x^3 + ax + b - */ - MPI_EC_WEIERSTRASS = 0, - /* The Montgomery equation is - * by^2 = x^3 + ax^2 + x - */ - MPI_EC_MONTGOMERY, - /* The Twisted Edwards equation is - * ax^2 + y^2 = 1 + bx^2y^2 - * Note that we use 'b' instead of the commonly used 'd'. - */ - MPI_EC_EDWARDS -}; - -/* Dialects used with elliptic curves */ -enum ecc_dialects { - ECC_DIALECT_STANDARD = 0, - ECC_DIALECT_ED25519, - ECC_DIALECT_SAFECURVE -}; - -/* This context is used with all our EC functions. */ -struct mpi_ec_ctx { - enum gcry_mpi_ec_models model; /* The model describing this curve. */ - enum ecc_dialects dialect; /* The ECC dialect used with the curve. */ - int flags; /* Public key flags (not always used). */ - unsigned int nbits; /* Number of bits. */ - - /* Domain parameters. Note that they may not all be set and if set - * the MPIs may be flagged as constant. - */ - MPI p; /* Prime specifying the field GF(p). */ - MPI a; /* First coefficient of the Weierstrass equation. */ - MPI b; /* Second coefficient of the Weierstrass equation. */ - MPI_POINT G; /* Base point (generator). */ - MPI n; /* Order of G. */ - unsigned int h; /* Cofactor. */ - - /* The actual key. May not be set. */ - MPI_POINT Q; /* Public key. */ - MPI d; /* Private key. */ - - const char *name; /* Name of the curve. */ - - /* This structure is private to mpi/ec.c! */ - struct { - struct { - unsigned int a_is_pminus3:1; - unsigned int two_inv_p:1; - } valid; /* Flags to help setting the helper vars below. */ - - int a_is_pminus3; /* True if A = P - 3. */ - - MPI two_inv_p; - - mpi_barrett_t p_barrett; - - /* Scratch variables. */ - MPI scratch[11]; - - /* Helper for fast reduction. */ - /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */ - /* MPI s[10]; */ - /* MPI c; */ - } t; - - /* Curve specific computation routines for the field. */ - void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec); - void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx); - void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx); -}; - -void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model, - enum ecc_dialects dialect, - int flags, MPI p, MPI a, MPI b); -void mpi_ec_deinit(struct mpi_ec_ctx *ctx); -MPI_POINT mpi_point_new(unsigned int nbits); -void mpi_point_release(MPI_POINT p); -void mpi_point_init(MPI_POINT p); -void mpi_point_free_parts(MPI_POINT p); -int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx); -void mpi_ec_add_points(MPI_POINT result, - MPI_POINT p1, MPI_POINT p2, - struct mpi_ec_ctx *ctx); -void mpi_ec_mul_point(MPI_POINT result, - MPI scalar, MPI_POINT point, - struct mpi_ec_ctx *ctx); -int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx); - /* inline functions */ /** diff --git a/lib/crypto/mpi/Makefile b/lib/crypto/mpi/Makefile index 6e6ef9a34fe1..477debd7ed50 100644 --- a/lib/crypto/mpi/Makefile +++ b/lib/crypto/mpi/Makefile @@ -13,7 +13,6 @@ mpi-y = \ generic_mpih-rshift.o \ generic_mpih-sub1.o \ generic_mpih-add1.o \ - ec.o \ mpicoder.o \ mpi-add.o \ mpi-bit.o \ diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c deleted file mode 100644 index 4781f00982ef..000000000000 --- a/lib/crypto/mpi/ec.c +++ /dev/null @@ -1,1507 +0,0 @@ -/* ec.c - Elliptic Curve functions - * Copyright (C) 2007 Free Software Foundation, Inc. - * Copyright (C) 2013 g10 Code GmbH - * - * This file is part of Libgcrypt. - * - * Libgcrypt is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * Libgcrypt is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, see . - */ - -#include "mpi-internal.h" -#include "longlong.h" - -#define point_init(a) mpi_point_init((a)) -#define point_free(a) mpi_point_free_parts((a)) - -#define log_error(fmt, ...) pr_err(fmt, ##__VA_ARGS__) -#define log_fatal(fmt, ...) pr_err(fmt, ##__VA_ARGS__) - -#define DIM(v) (sizeof(v)/sizeof((v)[0])) - - -/* Create a new point option. NBITS gives the size in bits of one - * coordinate; it is only used to pre-allocate some resources and - * might also be passed as 0 to use a default value. - */ -MPI_POINT mpi_point_new(unsigned int nbits) -{ - MPI_POINT p; - - (void)nbits; /* Currently not used. */ - - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (p) - mpi_point_init(p); - return p; -} -EXPORT_SYMBOL_GPL(mpi_point_new); - -/* Release the point object P. P may be NULL. */ -void mpi_point_release(MPI_POINT p) -{ - if (p) { - mpi_point_free_parts(p); - kfree(p); - } -} -EXPORT_SYMBOL_GPL(mpi_point_release); - -/* Initialize the fields of a point object. gcry_mpi_point_free_parts - * may be used to release the fields. - */ -void mpi_point_init(MPI_POINT p) -{ - p->x = mpi_new(0); - p->y = mpi_new(0); - p->z = mpi_new(0); -} -EXPORT_SYMBOL_GPL(mpi_point_init); - -/* Release the parts of a point object. */ -void mpi_point_free_parts(MPI_POINT p) -{ - mpi_free(p->x); p->x = NULL; - mpi_free(p->y); p->y = NULL; - mpi_free(p->z); p->z = NULL; -} -EXPORT_SYMBOL_GPL(mpi_point_free_parts); - -/* Set the value from S into D. */ -static void point_set(MPI_POINT d, MPI_POINT s) -{ - mpi_set(d->x, s->x); - mpi_set(d->y, s->y); - mpi_set(d->z, s->z); -} - -static void point_resize(MPI_POINT p, struct mpi_ec_ctx *ctx) -{ - size_t nlimbs = ctx->p->nlimbs; - - mpi_resize(p->x, nlimbs); - p->x->nlimbs = nlimbs; - mpi_resize(p->z, nlimbs); - p->z->nlimbs = nlimbs; - - if (ctx->model != MPI_EC_MONTGOMERY) { - mpi_resize(p->y, nlimbs); - p->y->nlimbs = nlimbs; - } -} - -static void point_swap_cond(MPI_POINT d, MPI_POINT s, unsigned long swap, - struct mpi_ec_ctx *ctx) -{ - mpi_swap_cond(d->x, s->x, swap); - if (ctx->model != MPI_EC_MONTGOMERY) - mpi_swap_cond(d->y, s->y, swap); - mpi_swap_cond(d->z, s->z, swap); -} - - -/* W = W mod P. */ -static void ec_mod(MPI w, struct mpi_ec_ctx *ec) -{ - if (ec->t.p_barrett) - mpi_mod_barrett(w, w, ec->t.p_barrett); - else - mpi_mod(w, w, ec->p); -} - -static void ec_addm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx) -{ - mpi_add(w, u, v); - ec_mod(w, ctx); -} - -static void ec_subm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec) -{ - mpi_sub(w, u, v); - while (w->sign) - mpi_add(w, w, ec->p); - /*ec_mod(w, ec);*/ -} - -static void ec_mulm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx) -{ - mpi_mul(w, u, v); - ec_mod(w, ctx); -} - -/* W = 2 * U mod P. */ -static void ec_mul2(MPI w, MPI u, struct mpi_ec_ctx *ctx) -{ - mpi_lshift(w, u, 1); - ec_mod(w, ctx); -} - -static void ec_powm(MPI w, const MPI b, const MPI e, - struct mpi_ec_ctx *ctx) -{ - mpi_powm(w, b, e, ctx->p); - /* mpi_abs(w); */ -} - -/* Shortcut for - * ec_powm(B, B, mpi_const(MPI_C_TWO), ctx); - * for easier optimization. - */ -static void ec_pow2(MPI w, const MPI b, struct mpi_ec_ctx *ctx) -{ - /* Using mpi_mul is slightly faster (at least on amd64). */ - /* mpi_powm(w, b, mpi_const(MPI_C_TWO), ctx->p); */ - ec_mulm(w, b, b, ctx); -} - -/* Shortcut for - * ec_powm(B, B, mpi_const(MPI_C_THREE), ctx); - * for easier optimization. - */ -static void ec_pow3(MPI w, const MPI b, struct mpi_ec_ctx *ctx) -{ - mpi_powm(w, b, mpi_const(MPI_C_THREE), ctx->p); -} - -static void ec_invm(MPI x, MPI a, struct mpi_ec_ctx *ctx) -{ - if (!mpi_invm(x, a, ctx->p)) - log_error("ec_invm: inverse does not exist:\n"); -} - -static void mpih_set_cond(mpi_ptr_t wp, mpi_ptr_t up, - mpi_size_t usize, unsigned long set) -{ - mpi_size_t i; - mpi_limb_t mask = ((mpi_limb_t)0) - set; - mpi_limb_t x; - - for (i = 0; i < usize; i++) { - x = mask & (wp[i] ^ up[i]); - wp[i] = wp[i] ^ x; - } -} - -/* Routines for 2^255 - 19. */ - -#define LIMB_SIZE_25519 ((256+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB) - -static void ec_addm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx) -{ - mpi_ptr_t wp, up, vp; - mpi_size_t wsize = LIMB_SIZE_25519; - mpi_limb_t n[LIMB_SIZE_25519]; - mpi_limb_t borrow; - - if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize) - log_bug("addm_25519: different sizes\n"); - - memset(n, 0, sizeof(n)); - up = u->d; - vp = v->d; - wp = w->d; - - mpihelp_add_n(wp, up, vp, wsize); - borrow = mpihelp_sub_n(wp, wp, ctx->p->d, wsize); - mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL)); - mpihelp_add_n(wp, wp, n, wsize); - wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB)); -} - -static void ec_subm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx) -{ - mpi_ptr_t wp, up, vp; - mpi_size_t wsize = LIMB_SIZE_25519; - mpi_limb_t n[LIMB_SIZE_25519]; - mpi_limb_t borrow; - - if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize) - log_bug("subm_25519: different sizes\n"); - - memset(n, 0, sizeof(n)); - up = u->d; - vp = v->d; - wp = w->d; - - borrow = mpihelp_sub_n(wp, up, vp, wsize); - mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL)); - mpihelp_add_n(wp, wp, n, wsize); - wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB)); -} - -static void ec_mulm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx) -{ - mpi_ptr_t wp, up, vp; - mpi_size_t wsize = LIMB_SIZE_25519; - mpi_limb_t n[LIMB_SIZE_25519*2]; - mpi_limb_t m[LIMB_SIZE_25519+1]; - mpi_limb_t cy; - int msb; - - (void)ctx; - if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize) - log_bug("mulm_25519: different sizes\n"); - - up = u->d; - vp = v->d; - wp = w->d; - - mpihelp_mul_n(n, up, vp, wsize); - memcpy(wp, n, wsize * BYTES_PER_MPI_LIMB); - wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB)); - - memcpy(m, n+LIMB_SIZE_25519-1, (wsize+1) * BYTES_PER_MPI_LIMB); - mpihelp_rshift(m, m, LIMB_SIZE_25519+1, (255 % BITS_PER_MPI_LIMB)); - - memcpy(n, m, wsize * BYTES_PER_MPI_LIMB); - cy = mpihelp_lshift(m, m, LIMB_SIZE_25519, 4); - m[LIMB_SIZE_25519] = cy; - cy = mpihelp_add_n(m, m, n, wsize); - m[LIMB_SIZE_25519] += cy; - cy = mpihelp_add_n(m, m, n, wsize); - m[LIMB_SIZE_25519] += cy; - cy = mpihelp_add_n(m, m, n, wsize); - m[LIMB_SIZE_25519] += cy; - - cy = mpihelp_add_n(wp, wp, m, wsize); - m[LIMB_SIZE_25519] += cy; - - memset(m, 0, wsize * BYTES_PER_MPI_LIMB); - msb = (wp[LIMB_SIZE_25519-1] >> (255 % BITS_PER_MPI_LIMB)); - m[0] = (m[LIMB_SIZE_25519] * 2 + msb) * 19; - wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB)); - mpihelp_add_n(wp, wp, m, wsize); - - m[0] = 0; - cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize); - mpih_set_cond(m, ctx->p->d, wsize, (cy != 0UL)); - mpihelp_add_n(wp, wp, m, wsize); -} - -static void ec_mul2_25519(MPI w, MPI u, struct mpi_ec_ctx *ctx) -{ - ec_addm_25519(w, u, u, ctx); -} - -static void ec_pow2_25519(MPI w, const MPI b, struct mpi_ec_ctx *ctx) -{ - ec_mulm_25519(w, b, b, ctx); -} - -/* Routines for 2^448 - 2^224 - 1. */ - -#define LIMB_SIZE_448 ((448+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB) -#define LIMB_SIZE_HALF_448 ((LIMB_SIZE_448+1)/2) - -static void ec_addm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx) -{ - mpi_ptr_t wp, up, vp; - mpi_size_t wsize = LIMB_SIZE_448; - mpi_limb_t n[LIMB_SIZE_448]; - mpi_limb_t cy; - - if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize) - log_bug("addm_448: different sizes\n"); - - memset(n, 0, sizeof(n)); - up = u->d; - vp = v->d; - wp = w->d; - - cy = mpihelp_add_n(wp, up, vp, wsize); - mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL)); - mpihelp_sub_n(wp, wp, n, wsize); -} - -static void ec_subm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx) -{ - mpi_ptr_t wp, up, vp; - mpi_size_t wsize = LIMB_SIZE_448; - mpi_limb_t n[LIMB_SIZE_448]; - mpi_limb_t borrow; - - if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize) - log_bug("subm_448: different sizes\n"); - - memset(n, 0, sizeof(n)); - up = u->d; - vp = v->d; - wp = w->d; - - borrow = mpihelp_sub_n(wp, up, vp, wsize); - mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL)); - mpihelp_add_n(wp, wp, n, wsize); -} - -static void ec_mulm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx) -{ - mpi_ptr_t wp, up, vp; - mpi_size_t wsize = LIMB_SIZE_448; - mpi_limb_t n[LIMB_SIZE_448*2]; - mpi_limb_t a2[LIMB_SIZE_HALF_448]; - mpi_limb_t a3[LIMB_SIZE_HALF_448]; - mpi_limb_t b0[LIMB_SIZE_HALF_448]; - mpi_limb_t b1[LIMB_SIZE_HALF_448]; - mpi_limb_t cy; - int i; -#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2) - mpi_limb_t b1_rest, a3_rest; -#endif - - if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize) - log_bug("mulm_448: different sizes\n"); - - up = u->d; - vp = v->d; - wp = w->d; - - mpihelp_mul_n(n, up, vp, wsize); - - for (i = 0; i < (wsize + 1) / 2; i++) { - b0[i] = n[i]; - b1[i] = n[i+wsize/2]; - a2[i] = n[i+wsize]; - a3[i] = n[i+wsize+wsize/2]; - } - -#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2) - b0[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1; - a2[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1; - - b1_rest = 0; - a3_rest = 0; - - for (i = (wsize + 1) / 2 - 1; i >= 0; i--) { - mpi_limb_t b1v, a3v; - b1v = b1[i]; - a3v = a3[i]; - b1[i] = (b1_rest << 32) | (b1v >> 32); - a3[i] = (a3_rest << 32) | (a3v >> 32); - b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1); - a3_rest = a3v & (((mpi_limb_t)1UL << 32)-1); - } -#endif - - cy = mpihelp_add_n(b0, b0, a2, LIMB_SIZE_HALF_448); - cy += mpihelp_add_n(b0, b0, a3, LIMB_SIZE_HALF_448); - for (i = 0; i < (wsize + 1) / 2; i++) - wp[i] = b0[i]; -#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2) - wp[LIMB_SIZE_HALF_448-1] &= (((mpi_limb_t)1UL << 32)-1); -#endif - -#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2) - cy = b0[LIMB_SIZE_HALF_448-1] >> 32; -#endif - - cy = mpihelp_add_1(b1, b1, LIMB_SIZE_HALF_448, cy); - cy += mpihelp_add_n(b1, b1, a2, LIMB_SIZE_HALF_448); - cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448); - cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448); -#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2) - b1_rest = 0; - for (i = (wsize + 1) / 2 - 1; i >= 0; i--) { - mpi_limb_t b1v = b1[i]; - b1[i] = (b1_rest << 32) | (b1v >> 32); - b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1); - } - wp[LIMB_SIZE_HALF_448-1] |= (b1_rest << 32); -#endif - for (i = 0; i < wsize / 2; i++) - wp[i+(wsize + 1) / 2] = b1[i]; - -#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2) - cy = b1[LIMB_SIZE_HALF_448-1]; -#endif - - memset(n, 0, wsize * BYTES_PER_MPI_LIMB); - -#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2) - n[LIMB_SIZE_HALF_448-1] = cy << 32; -#else - n[LIMB_SIZE_HALF_448] = cy; -#endif - n[0] = cy; - mpihelp_add_n(wp, wp, n, wsize); - - memset(n, 0, wsize * BYTES_PER_MPI_LIMB); - cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize); - mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL)); - mpihelp_add_n(wp, wp, n, wsize); -} - -static void ec_mul2_448(MPI w, MPI u, struct mpi_ec_ctx *ctx) -{ - ec_addm_448(w, u, u, ctx); -} - -static void ec_pow2_448(MPI w, const MPI b, struct mpi_ec_ctx *ctx) -{ - ec_mulm_448(w, b, b, ctx); -} - -struct field_table { - const char *p; - - /* computation routines for the field. */ - void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx); - void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx); -}; - -static const struct field_table field_table[] = { - { - "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED", - ec_addm_25519, - ec_subm_25519, - ec_mulm_25519, - ec_mul2_25519, - ec_pow2_25519 - }, - { - "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE" - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ec_addm_448, - ec_subm_448, - ec_mulm_448, - ec_mul2_448, - ec_pow2_448 - }, - { NULL, NULL, NULL, NULL, NULL, NULL }, -}; - -/* Force recomputation of all helper variables. */ -static void mpi_ec_get_reset(struct mpi_ec_ctx *ec) -{ - ec->t.valid.a_is_pminus3 = 0; - ec->t.valid.two_inv_p = 0; -} - -/* Accessor for helper variable. */ -static int ec_get_a_is_pminus3(struct mpi_ec_ctx *ec) -{ - MPI tmp; - - if (!ec->t.valid.a_is_pminus3) { - ec->t.valid.a_is_pminus3 = 1; - tmp = mpi_alloc_like(ec->p); - mpi_sub_ui(tmp, ec->p, 3); - ec->t.a_is_pminus3 = !mpi_cmp(ec->a, tmp); - mpi_free(tmp); - } - - return ec->t.a_is_pminus3; -} - -/* Accessor for helper variable. */ -static MPI ec_get_two_inv_p(struct mpi_ec_ctx *ec) -{ - if (!ec->t.valid.two_inv_p) { - ec->t.valid.two_inv_p = 1; - if (!ec->t.two_inv_p) - ec->t.two_inv_p = mpi_alloc(0); - ec_invm(ec->t.two_inv_p, mpi_const(MPI_C_TWO), ec); - } - return ec->t.two_inv_p; -} - -static const char *const curve25519_bad_points[] = { - "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x00b8495f16056286fdb1329ceb8d09da6ac49ff1fae35616aeb8413b7c7aebe0", - "0x57119fd0dd4e22d8868e1c58c45c44045bef839c55b1d0b1248c50a3bc959c5f", - "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec", - "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee", - NULL -}; - -static const char *const curve448_bad_points[] = { - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe" - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x00000000000000000000000000000000000000000000000000000000" - "00000000000000000000000000000000000000000000000000000000", - "0x00000000000000000000000000000000000000000000000000000000" - "00000000000000000000000000000000000000000000000000000001", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe" - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffff" - "00000000000000000000000000000000000000000000000000000000", - NULL -}; - -static const char *const *bad_points_table[] = { - curve25519_bad_points, - curve448_bad_points, -}; - -static void mpi_ec_coefficient_normalize(MPI a, MPI p) -{ - if (a->sign) { - mpi_resize(a, p->nlimbs); - mpihelp_sub_n(a->d, p->d, a->d, p->nlimbs); - a->nlimbs = p->nlimbs; - a->sign = 0; - } -} - -/* This function initialized a context for elliptic curve based on the - * field GF(p). P is the prime specifying this field, A is the first - * coefficient. CTX is expected to be zeroized. - */ -void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model, - enum ecc_dialects dialect, - int flags, MPI p, MPI a, MPI b) -{ - int i; - static int use_barrett = -1 /* TODO: 1 or -1 */; - - mpi_ec_coefficient_normalize(a, p); - mpi_ec_coefficient_normalize(b, p); - - /* Fixme: Do we want to check some constraints? e.g. a < p */ - - ctx->model = model; - ctx->dialect = dialect; - ctx->flags = flags; - if (dialect == ECC_DIALECT_ED25519) - ctx->nbits = 256; - else - ctx->nbits = mpi_get_nbits(p); - ctx->p = mpi_copy(p); - ctx->a = mpi_copy(a); - ctx->b = mpi_copy(b); - - ctx->d = NULL; - ctx->t.two_inv_p = NULL; - - ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL; - - mpi_ec_get_reset(ctx); - - if (model == MPI_EC_MONTGOMERY) { - for (i = 0; i < DIM(bad_points_table); i++) { - MPI p_candidate = mpi_scanval(bad_points_table[i][0]); - int match_p = !mpi_cmp(ctx->p, p_candidate); - int j; - - mpi_free(p_candidate); - if (!match_p) - continue; - - for (j = 0; i < DIM(ctx->t.scratch) && bad_points_table[i][j]; j++) - ctx->t.scratch[j] = mpi_scanval(bad_points_table[i][j]); - } - } else { - /* Allocate scratch variables. */ - for (i = 0; i < DIM(ctx->t.scratch); i++) - ctx->t.scratch[i] = mpi_alloc_like(ctx->p); - } - - ctx->addm = ec_addm; - ctx->subm = ec_subm; - ctx->mulm = ec_mulm; - ctx->mul2 = ec_mul2; - ctx->pow2 = ec_pow2; - - for (i = 0; field_table[i].p; i++) { - MPI f_p; - - f_p = mpi_scanval(field_table[i].p); - if (!f_p) - break; - - if (!mpi_cmp(p, f_p)) { - ctx->addm = field_table[i].addm; - ctx->subm = field_table[i].subm; - ctx->mulm = field_table[i].mulm; - ctx->mul2 = field_table[i].mul2; - ctx->pow2 = field_table[i].pow2; - mpi_free(f_p); - - mpi_resize(ctx->a, ctx->p->nlimbs); - ctx->a->nlimbs = ctx->p->nlimbs; - - mpi_resize(ctx->b, ctx->p->nlimbs); - ctx->b->nlimbs = ctx->p->nlimbs; - - for (i = 0; i < DIM(ctx->t.scratch) && ctx->t.scratch[i]; i++) - ctx->t.scratch[i]->nlimbs = ctx->p->nlimbs; - - break; - } - - mpi_free(f_p); - } -} -EXPORT_SYMBOL_GPL(mpi_ec_init); - -void mpi_ec_deinit(struct mpi_ec_ctx *ctx) -{ - int i; - - mpi_barrett_free(ctx->t.p_barrett); - - /* Domain parameter. */ - mpi_free(ctx->p); - mpi_free(ctx->a); - mpi_free(ctx->b); - mpi_point_release(ctx->G); - mpi_free(ctx->n); - - /* The key. */ - mpi_point_release(ctx->Q); - mpi_free(ctx->d); - - /* Private data of ec.c. */ - mpi_free(ctx->t.two_inv_p); - - for (i = 0; i < DIM(ctx->t.scratch); i++) - mpi_free(ctx->t.scratch[i]); -} -EXPORT_SYMBOL_GPL(mpi_ec_deinit); - -/* Compute the affine coordinates from the projective coordinates in - * POINT. Set them into X and Y. If one coordinate is not required, - * X or Y may be passed as NULL. CTX is the usual context. Returns: 0 - * on success or !0 if POINT is at infinity. - */ -int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx) -{ - if (!mpi_cmp_ui(point->z, 0)) - return -1; - - switch (ctx->model) { - case MPI_EC_WEIERSTRASS: /* Using Jacobian coordinates. */ - { - MPI z1, z2, z3; - - z1 = mpi_new(0); - z2 = mpi_new(0); - ec_invm(z1, point->z, ctx); /* z1 = z^(-1) mod p */ - ec_mulm(z2, z1, z1, ctx); /* z2 = z^(-2) mod p */ - - if (x) - ec_mulm(x, point->x, z2, ctx); - - if (y) { - z3 = mpi_new(0); - ec_mulm(z3, z2, z1, ctx); /* z3 = z^(-3) mod p */ - ec_mulm(y, point->y, z3, ctx); - mpi_free(z3); - } - - mpi_free(z2); - mpi_free(z1); - } - return 0; - - case MPI_EC_MONTGOMERY: - { - if (x) - mpi_set(x, point->x); - - if (y) { - log_fatal("%s: Getting Y-coordinate on %s is not supported\n", - "mpi_ec_get_affine", "Montgomery"); - return -1; - } - } - return 0; - - case MPI_EC_EDWARDS: - { - MPI z; - - z = mpi_new(0); - ec_invm(z, point->z, ctx); - - mpi_resize(z, ctx->p->nlimbs); - z->nlimbs = ctx->p->nlimbs; - - if (x) { - mpi_resize(x, ctx->p->nlimbs); - x->nlimbs = ctx->p->nlimbs; - ctx->mulm(x, point->x, z, ctx); - } - if (y) { - mpi_resize(y, ctx->p->nlimbs); - y->nlimbs = ctx->p->nlimbs; - ctx->mulm(y, point->y, z, ctx); - } - - mpi_free(z); - } - return 0; - - default: - return -1; - } -} -EXPORT_SYMBOL_GPL(mpi_ec_get_affine); - -/* RESULT = 2 * POINT (Weierstrass version). */ -static void dup_point_weierstrass(MPI_POINT result, - MPI_POINT point, struct mpi_ec_ctx *ctx) -{ -#define x3 (result->x) -#define y3 (result->y) -#define z3 (result->z) -#define t1 (ctx->t.scratch[0]) -#define t2 (ctx->t.scratch[1]) -#define t3 (ctx->t.scratch[2]) -#define l1 (ctx->t.scratch[3]) -#define l2 (ctx->t.scratch[4]) -#define l3 (ctx->t.scratch[5]) - - if (!mpi_cmp_ui(point->y, 0) || !mpi_cmp_ui(point->z, 0)) { - /* P_y == 0 || P_z == 0 => [1:1:0] */ - mpi_set_ui(x3, 1); - mpi_set_ui(y3, 1); - mpi_set_ui(z3, 0); - } else { - if (ec_get_a_is_pminus3(ctx)) { - /* Use the faster case. */ - /* L1 = 3(X - Z^2)(X + Z^2) */ - /* T1: used for Z^2. */ - /* T2: used for the right term. */ - ec_pow2(t1, point->z, ctx); - ec_subm(l1, point->x, t1, ctx); - ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx); - ec_addm(t2, point->x, t1, ctx); - ec_mulm(l1, l1, t2, ctx); - } else { - /* Standard case. */ - /* L1 = 3X^2 + aZ^4 */ - /* T1: used for aZ^4. */ - ec_pow2(l1, point->x, ctx); - ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx); - ec_powm(t1, point->z, mpi_const(MPI_C_FOUR), ctx); - ec_mulm(t1, t1, ctx->a, ctx); - ec_addm(l1, l1, t1, ctx); - } - /* Z3 = 2YZ */ - ec_mulm(z3, point->y, point->z, ctx); - ec_mul2(z3, z3, ctx); - - /* L2 = 4XY^2 */ - /* T2: used for Y2; required later. */ - ec_pow2(t2, point->y, ctx); - ec_mulm(l2, t2, point->x, ctx); - ec_mulm(l2, l2, mpi_const(MPI_C_FOUR), ctx); - - /* X3 = L1^2 - 2L2 */ - /* T1: used for L2^2. */ - ec_pow2(x3, l1, ctx); - ec_mul2(t1, l2, ctx); - ec_subm(x3, x3, t1, ctx); - - /* L3 = 8Y^4 */ - /* T2: taken from above. */ - ec_pow2(t2, t2, ctx); - ec_mulm(l3, t2, mpi_const(MPI_C_EIGHT), ctx); - - /* Y3 = L1(L2 - X3) - L3 */ - ec_subm(y3, l2, x3, ctx); - ec_mulm(y3, y3, l1, ctx); - ec_subm(y3, y3, l3, ctx); - } - -#undef x3 -#undef y3 -#undef z3 -#undef t1 -#undef t2 -#undef t3 -#undef l1 -#undef l2 -#undef l3 -} - -/* RESULT = 2 * POINT (Montgomery version). */ -static void dup_point_montgomery(MPI_POINT result, - MPI_POINT point, struct mpi_ec_ctx *ctx) -{ - (void)result; - (void)point; - (void)ctx; - log_fatal("%s: %s not yet supported\n", - "mpi_ec_dup_point", "Montgomery"); -} - -/* RESULT = 2 * POINT (Twisted Edwards version). */ -static void dup_point_edwards(MPI_POINT result, - MPI_POINT point, struct mpi_ec_ctx *ctx) -{ -#define X1 (point->x) -#define Y1 (point->y) -#define Z1 (point->z) -#define X3 (result->x) -#define Y3 (result->y) -#define Z3 (result->z) -#define B (ctx->t.scratch[0]) -#define C (ctx->t.scratch[1]) -#define D (ctx->t.scratch[2]) -#define E (ctx->t.scratch[3]) -#define F (ctx->t.scratch[4]) -#define H (ctx->t.scratch[5]) -#define J (ctx->t.scratch[6]) - - /* Compute: (X_3 : Y_3 : Z_3) = 2( X_1 : Y_1 : Z_1 ) */ - - /* B = (X_1 + Y_1)^2 */ - ctx->addm(B, X1, Y1, ctx); - ctx->pow2(B, B, ctx); - - /* C = X_1^2 */ - /* D = Y_1^2 */ - ctx->pow2(C, X1, ctx); - ctx->pow2(D, Y1, ctx); - - /* E = aC */ - if (ctx->dialect == ECC_DIALECT_ED25519) - ctx->subm(E, ctx->p, C, ctx); - else - ctx->mulm(E, ctx->a, C, ctx); - - /* F = E + D */ - ctx->addm(F, E, D, ctx); - - /* H = Z_1^2 */ - ctx->pow2(H, Z1, ctx); - - /* J = F - 2H */ - ctx->mul2(J, H, ctx); - ctx->subm(J, F, J, ctx); - - /* X_3 = (B - C - D) · J */ - ctx->subm(X3, B, C, ctx); - ctx->subm(X3, X3, D, ctx); - ctx->mulm(X3, X3, J, ctx); - - /* Y_3 = F · (E - D) */ - ctx->subm(Y3, E, D, ctx); - ctx->mulm(Y3, Y3, F, ctx); - - /* Z_3 = F · J */ - ctx->mulm(Z3, F, J, ctx); - -#undef X1 -#undef Y1 -#undef Z1 -#undef X3 -#undef Y3 -#undef Z3 -#undef B -#undef C -#undef D -#undef E -#undef F -#undef H -#undef J -} - -/* RESULT = 2 * POINT */ -static void -mpi_ec_dup_point(MPI_POINT result, MPI_POINT point, struct mpi_ec_ctx *ctx) -{ - switch (ctx->model) { - case MPI_EC_WEIERSTRASS: - dup_point_weierstrass(result, point, ctx); - break; - case MPI_EC_MONTGOMERY: - dup_point_montgomery(result, point, ctx); - break; - case MPI_EC_EDWARDS: - dup_point_edwards(result, point, ctx); - break; - } -} - -/* RESULT = P1 + P2 (Weierstrass version).*/ -static void add_points_weierstrass(MPI_POINT result, - MPI_POINT p1, MPI_POINT p2, - struct mpi_ec_ctx *ctx) -{ -#define x1 (p1->x) -#define y1 (p1->y) -#define z1 (p1->z) -#define x2 (p2->x) -#define y2 (p2->y) -#define z2 (p2->z) -#define x3 (result->x) -#define y3 (result->y) -#define z3 (result->z) -#define l1 (ctx->t.scratch[0]) -#define l2 (ctx->t.scratch[1]) -#define l3 (ctx->t.scratch[2]) -#define l4 (ctx->t.scratch[3]) -#define l5 (ctx->t.scratch[4]) -#define l6 (ctx->t.scratch[5]) -#define l7 (ctx->t.scratch[6]) -#define l8 (ctx->t.scratch[7]) -#define l9 (ctx->t.scratch[8]) -#define t1 (ctx->t.scratch[9]) -#define t2 (ctx->t.scratch[10]) - - if ((!mpi_cmp(x1, x2)) && (!mpi_cmp(y1, y2)) && (!mpi_cmp(z1, z2))) { - /* Same point; need to call the duplicate function. */ - mpi_ec_dup_point(result, p1, ctx); - } else if (!mpi_cmp_ui(z1, 0)) { - /* P1 is at infinity. */ - mpi_set(x3, p2->x); - mpi_set(y3, p2->y); - mpi_set(z3, p2->z); - } else if (!mpi_cmp_ui(z2, 0)) { - /* P2 is at infinity. */ - mpi_set(x3, p1->x); - mpi_set(y3, p1->y); - mpi_set(z3, p1->z); - } else { - int z1_is_one = !mpi_cmp_ui(z1, 1); - int z2_is_one = !mpi_cmp_ui(z2, 1); - - /* l1 = x1 z2^2 */ - /* l2 = x2 z1^2 */ - if (z2_is_one) - mpi_set(l1, x1); - else { - ec_pow2(l1, z2, ctx); - ec_mulm(l1, l1, x1, ctx); - } - if (z1_is_one) - mpi_set(l2, x2); - else { - ec_pow2(l2, z1, ctx); - ec_mulm(l2, l2, x2, ctx); - } - /* l3 = l1 - l2 */ - ec_subm(l3, l1, l2, ctx); - /* l4 = y1 z2^3 */ - ec_powm(l4, z2, mpi_const(MPI_C_THREE), ctx); - ec_mulm(l4, l4, y1, ctx); - /* l5 = y2 z1^3 */ - ec_powm(l5, z1, mpi_const(MPI_C_THREE), ctx); - ec_mulm(l5, l5, y2, ctx); - /* l6 = l4 - l5 */ - ec_subm(l6, l4, l5, ctx); - - if (!mpi_cmp_ui(l3, 0)) { - if (!mpi_cmp_ui(l6, 0)) { - /* P1 and P2 are the same - use duplicate function. */ - mpi_ec_dup_point(result, p1, ctx); - } else { - /* P1 is the inverse of P2. */ - mpi_set_ui(x3, 1); - mpi_set_ui(y3, 1); - mpi_set_ui(z3, 0); - } - } else { - /* l7 = l1 + l2 */ - ec_addm(l7, l1, l2, ctx); - /* l8 = l4 + l5 */ - ec_addm(l8, l4, l5, ctx); - /* z3 = z1 z2 l3 */ - ec_mulm(z3, z1, z2, ctx); - ec_mulm(z3, z3, l3, ctx); - /* x3 = l6^2 - l7 l3^2 */ - ec_pow2(t1, l6, ctx); - ec_pow2(t2, l3, ctx); - ec_mulm(t2, t2, l7, ctx); - ec_subm(x3, t1, t2, ctx); - /* l9 = l7 l3^2 - 2 x3 */ - ec_mul2(t1, x3, ctx); - ec_subm(l9, t2, t1, ctx); - /* y3 = (l9 l6 - l8 l3^3)/2 */ - ec_mulm(l9, l9, l6, ctx); - ec_powm(t1, l3, mpi_const(MPI_C_THREE), ctx); /* fixme: Use saved value*/ - ec_mulm(t1, t1, l8, ctx); - ec_subm(y3, l9, t1, ctx); - ec_mulm(y3, y3, ec_get_two_inv_p(ctx), ctx); - } - } - -#undef x1 -#undef y1 -#undef z1 -#undef x2 -#undef y2 -#undef z2 -#undef x3 -#undef y3 -#undef z3 -#undef l1 -#undef l2 -#undef l3 -#undef l4 -#undef l5 -#undef l6 -#undef l7 -#undef l8 -#undef l9 -#undef t1 -#undef t2 -} - -/* RESULT = P1 + P2 (Montgomery version).*/ -static void add_points_montgomery(MPI_POINT result, - MPI_POINT p1, MPI_POINT p2, - struct mpi_ec_ctx *ctx) -{ - (void)result; - (void)p1; - (void)p2; - (void)ctx; - log_fatal("%s: %s not yet supported\n", - "mpi_ec_add_points", "Montgomery"); -} - -/* RESULT = P1 + P2 (Twisted Edwards version).*/ -static void add_points_edwards(MPI_POINT result, - MPI_POINT p1, MPI_POINT p2, - struct mpi_ec_ctx *ctx) -{ -#define X1 (p1->x) -#define Y1 (p1->y) -#define Z1 (p1->z) -#define X2 (p2->x) -#define Y2 (p2->y) -#define Z2 (p2->z) -#define X3 (result->x) -#define Y3 (result->y) -#define Z3 (result->z) -#define A (ctx->t.scratch[0]) -#define B (ctx->t.scratch[1]) -#define C (ctx->t.scratch[2]) -#define D (ctx->t.scratch[3]) -#define E (ctx->t.scratch[4]) -#define F (ctx->t.scratch[5]) -#define G (ctx->t.scratch[6]) -#define tmp (ctx->t.scratch[7]) - - point_resize(result, ctx); - - /* Compute: (X_3 : Y_3 : Z_3) = (X_1 : Y_1 : Z_1) + (X_2 : Y_2 : Z_3) */ - - /* A = Z1 · Z2 */ - ctx->mulm(A, Z1, Z2, ctx); - - /* B = A^2 */ - ctx->pow2(B, A, ctx); - - /* C = X1 · X2 */ - ctx->mulm(C, X1, X2, ctx); - - /* D = Y1 · Y2 */ - ctx->mulm(D, Y1, Y2, ctx); - - /* E = d · C · D */ - ctx->mulm(E, ctx->b, C, ctx); - ctx->mulm(E, E, D, ctx); - - /* F = B - E */ - ctx->subm(F, B, E, ctx); - - /* G = B + E */ - ctx->addm(G, B, E, ctx); - - /* X_3 = A · F · ((X_1 + Y_1) · (X_2 + Y_2) - C - D) */ - ctx->addm(tmp, X1, Y1, ctx); - ctx->addm(X3, X2, Y2, ctx); - ctx->mulm(X3, X3, tmp, ctx); - ctx->subm(X3, X3, C, ctx); - ctx->subm(X3, X3, D, ctx); - ctx->mulm(X3, X3, F, ctx); - ctx->mulm(X3, X3, A, ctx); - - /* Y_3 = A · G · (D - aC) */ - if (ctx->dialect == ECC_DIALECT_ED25519) { - ctx->addm(Y3, D, C, ctx); - } else { - ctx->mulm(Y3, ctx->a, C, ctx); - ctx->subm(Y3, D, Y3, ctx); - } - ctx->mulm(Y3, Y3, G, ctx); - ctx->mulm(Y3, Y3, A, ctx); - - /* Z_3 = F · G */ - ctx->mulm(Z3, F, G, ctx); - - -#undef X1 -#undef Y1 -#undef Z1 -#undef X2 -#undef Y2 -#undef Z2 -#undef X3 -#undef Y3 -#undef Z3 -#undef A -#undef B -#undef C -#undef D -#undef E -#undef F -#undef G -#undef tmp -} - -/* Compute a step of Montgomery Ladder (only use X and Z in the point). - * Inputs: P1, P2, and x-coordinate of DIF = P1 - P1. - * Outputs: PRD = 2 * P1 and SUM = P1 + P2. - */ -static void montgomery_ladder(MPI_POINT prd, MPI_POINT sum, - MPI_POINT p1, MPI_POINT p2, MPI dif_x, - struct mpi_ec_ctx *ctx) -{ - ctx->addm(sum->x, p2->x, p2->z, ctx); - ctx->subm(p2->z, p2->x, p2->z, ctx); - ctx->addm(prd->x, p1->x, p1->z, ctx); - ctx->subm(p1->z, p1->x, p1->z, ctx); - ctx->mulm(p2->x, p1->z, sum->x, ctx); - ctx->mulm(p2->z, prd->x, p2->z, ctx); - ctx->pow2(p1->x, prd->x, ctx); - ctx->pow2(p1->z, p1->z, ctx); - ctx->addm(sum->x, p2->x, p2->z, ctx); - ctx->subm(p2->z, p2->x, p2->z, ctx); - ctx->mulm(prd->x, p1->x, p1->z, ctx); - ctx->subm(p1->z, p1->x, p1->z, ctx); - ctx->pow2(sum->x, sum->x, ctx); - ctx->pow2(sum->z, p2->z, ctx); - ctx->mulm(prd->z, p1->z, ctx->a, ctx); /* CTX->A: (a-2)/4 */ - ctx->mulm(sum->z, sum->z, dif_x, ctx); - ctx->addm(prd->z, p1->x, prd->z, ctx); - ctx->mulm(prd->z, prd->z, p1->z, ctx); -} - -/* RESULT = P1 + P2 */ -void mpi_ec_add_points(MPI_POINT result, - MPI_POINT p1, MPI_POINT p2, - struct mpi_ec_ctx *ctx) -{ - switch (ctx->model) { - case MPI_EC_WEIERSTRASS: - add_points_weierstrass(result, p1, p2, ctx); - break; - case MPI_EC_MONTGOMERY: - add_points_montgomery(result, p1, p2, ctx); - break; - case MPI_EC_EDWARDS: - add_points_edwards(result, p1, p2, ctx); - break; - } -} -EXPORT_SYMBOL_GPL(mpi_ec_add_points); - -/* Scalar point multiplication - the main function for ECC. If takes - * an integer SCALAR and a POINT as well as the usual context CTX. - * RESULT will be set to the resulting point. - */ -void mpi_ec_mul_point(MPI_POINT result, - MPI scalar, MPI_POINT point, - struct mpi_ec_ctx *ctx) -{ - MPI x1, y1, z1, k, h, yy; - unsigned int i, loops; - struct gcry_mpi_point p1, p2, p1inv; - - if (ctx->model == MPI_EC_EDWARDS) { - /* Simple left to right binary method. Algorithm 3.27 from - * {author={Hankerson, Darrel and Menezes, Alfred J. and Vanstone, Scott}, - * title = {Guide to Elliptic Curve Cryptography}, - * year = {2003}, isbn = {038795273X}, - * url = {http://www.cacr.math.uwaterloo.ca/ecc/}, - * publisher = {Springer-Verlag New York, Inc.}} - */ - unsigned int nbits; - int j; - - if (mpi_cmp(scalar, ctx->p) >= 0) - nbits = mpi_get_nbits(scalar); - else - nbits = mpi_get_nbits(ctx->p); - - mpi_set_ui(result->x, 0); - mpi_set_ui(result->y, 1); - mpi_set_ui(result->z, 1); - point_resize(point, ctx); - - point_resize(result, ctx); - point_resize(point, ctx); - - for (j = nbits-1; j >= 0; j--) { - mpi_ec_dup_point(result, result, ctx); - if (mpi_test_bit(scalar, j)) - mpi_ec_add_points(result, result, point, ctx); - } - return; - } else if (ctx->model == MPI_EC_MONTGOMERY) { - unsigned int nbits; - int j; - struct gcry_mpi_point p1_, p2_; - MPI_POINT q1, q2, prd, sum; - unsigned long sw; - mpi_size_t rsize; - - /* Compute scalar point multiplication with Montgomery Ladder. - * Note that we don't use Y-coordinate in the points at all. - * RESULT->Y will be filled by zero. - */ - - nbits = mpi_get_nbits(scalar); - point_init(&p1); - point_init(&p2); - point_init(&p1_); - point_init(&p2_); - mpi_set_ui(p1.x, 1); - mpi_free(p2.x); - p2.x = mpi_copy(point->x); - mpi_set_ui(p2.z, 1); - - point_resize(&p1, ctx); - point_resize(&p2, ctx); - point_resize(&p1_, ctx); - point_resize(&p2_, ctx); - - mpi_resize(point->x, ctx->p->nlimbs); - point->x->nlimbs = ctx->p->nlimbs; - - q1 = &p1; - q2 = &p2; - prd = &p1_; - sum = &p2_; - - for (j = nbits-1; j >= 0; j--) { - sw = mpi_test_bit(scalar, j); - point_swap_cond(q1, q2, sw, ctx); - montgomery_ladder(prd, sum, q1, q2, point->x, ctx); - point_swap_cond(prd, sum, sw, ctx); - swap(q1, prd); - swap(q2, sum); - } - - mpi_clear(result->y); - sw = (nbits & 1); - point_swap_cond(&p1, &p1_, sw, ctx); - - rsize = p1.z->nlimbs; - MPN_NORMALIZE(p1.z->d, rsize); - if (rsize == 0) { - mpi_set_ui(result->x, 1); - mpi_set_ui(result->z, 0); - } else { - z1 = mpi_new(0); - ec_invm(z1, p1.z, ctx); - ec_mulm(result->x, p1.x, z1, ctx); - mpi_set_ui(result->z, 1); - mpi_free(z1); - } - - point_free(&p1); - point_free(&p2); - point_free(&p1_); - point_free(&p2_); - return; - } - - x1 = mpi_alloc_like(ctx->p); - y1 = mpi_alloc_like(ctx->p); - h = mpi_alloc_like(ctx->p); - k = mpi_copy(scalar); - yy = mpi_copy(point->y); - - if (mpi_has_sign(k)) { - k->sign = 0; - ec_invm(yy, yy, ctx); - } - - if (!mpi_cmp_ui(point->z, 1)) { - mpi_set(x1, point->x); - mpi_set(y1, yy); - } else { - MPI z2, z3; - - z2 = mpi_alloc_like(ctx->p); - z3 = mpi_alloc_like(ctx->p); - ec_mulm(z2, point->z, point->z, ctx); - ec_mulm(z3, point->z, z2, ctx); - ec_invm(z2, z2, ctx); - ec_mulm(x1, point->x, z2, ctx); - ec_invm(z3, z3, ctx); - ec_mulm(y1, yy, z3, ctx); - mpi_free(z2); - mpi_free(z3); - } - z1 = mpi_copy(mpi_const(MPI_C_ONE)); - - mpi_mul(h, k, mpi_const(MPI_C_THREE)); /* h = 3k */ - loops = mpi_get_nbits(h); - if (loops < 2) { - /* If SCALAR is zero, the above mpi_mul sets H to zero and thus - * LOOPs will be zero. To avoid an underflow of I in the main - * loop we set LOOP to 2 and the result to (0,0,0). - */ - loops = 2; - mpi_clear(result->x); - mpi_clear(result->y); - mpi_clear(result->z); - } else { - mpi_set(result->x, point->x); - mpi_set(result->y, yy); - mpi_set(result->z, point->z); - } - mpi_free(yy); yy = NULL; - - p1.x = x1; x1 = NULL; - p1.y = y1; y1 = NULL; - p1.z = z1; z1 = NULL; - point_init(&p2); - point_init(&p1inv); - - /* Invert point: y = p - y mod p */ - point_set(&p1inv, &p1); - ec_subm(p1inv.y, ctx->p, p1inv.y, ctx); - - for (i = loops-2; i > 0; i--) { - mpi_ec_dup_point(result, result, ctx); - if (mpi_test_bit(h, i) == 1 && mpi_test_bit(k, i) == 0) { - point_set(&p2, result); - mpi_ec_add_points(result, &p2, &p1, ctx); - } - if (mpi_test_bit(h, i) == 0 && mpi_test_bit(k, i) == 1) { - point_set(&p2, result); - mpi_ec_add_points(result, &p2, &p1inv, ctx); - } - } - - point_free(&p1); - point_free(&p2); - point_free(&p1inv); - mpi_free(h); - mpi_free(k); -} -EXPORT_SYMBOL_GPL(mpi_ec_mul_point); - -/* Return true if POINT is on the curve described by CTX. */ -int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx) -{ - int res = 0; - MPI x, y, w; - - x = mpi_new(0); - y = mpi_new(0); - w = mpi_new(0); - - /* Check that the point is in range. This needs to be done here and - * not after conversion to affine coordinates. - */ - if (mpi_cmpabs(point->x, ctx->p) >= 0) - goto leave; - if (mpi_cmpabs(point->y, ctx->p) >= 0) - goto leave; - if (mpi_cmpabs(point->z, ctx->p) >= 0) - goto leave; - - switch (ctx->model) { - case MPI_EC_WEIERSTRASS: - { - MPI xxx; - - if (mpi_ec_get_affine(x, y, point, ctx)) - goto leave; - - xxx = mpi_new(0); - - /* y^2 == x^3 + a·x + b */ - ec_pow2(y, y, ctx); - - ec_pow3(xxx, x, ctx); - ec_mulm(w, ctx->a, x, ctx); - ec_addm(w, w, ctx->b, ctx); - ec_addm(w, w, xxx, ctx); - - if (!mpi_cmp(y, w)) - res = 1; - - mpi_free(xxx); - } - break; - - case MPI_EC_MONTGOMERY: - { -#define xx y - /* With Montgomery curve, only X-coordinate is valid. */ - if (mpi_ec_get_affine(x, NULL, point, ctx)) - goto leave; - - /* The equation is: b * y^2 == x^3 + a · x^2 + x */ - /* We check if right hand is quadratic residue or not by - * Euler's criterion. - */ - /* CTX->A has (a-2)/4 and CTX->B has b^-1 */ - ec_mulm(w, ctx->a, mpi_const(MPI_C_FOUR), ctx); - ec_addm(w, w, mpi_const(MPI_C_TWO), ctx); - ec_mulm(w, w, x, ctx); - ec_pow2(xx, x, ctx); - ec_addm(w, w, xx, ctx); - ec_addm(w, w, mpi_const(MPI_C_ONE), ctx); - ec_mulm(w, w, x, ctx); - ec_mulm(w, w, ctx->b, ctx); -#undef xx - /* Compute Euler's criterion: w^(p-1)/2 */ -#define p_minus1 y - ec_subm(p_minus1, ctx->p, mpi_const(MPI_C_ONE), ctx); - mpi_rshift(p_minus1, p_minus1, 1); - ec_powm(w, w, p_minus1, ctx); - - res = !mpi_cmp_ui(w, 1); -#undef p_minus1 - } - break; - - case MPI_EC_EDWARDS: - { - if (mpi_ec_get_affine(x, y, point, ctx)) - goto leave; - - mpi_resize(w, ctx->p->nlimbs); - w->nlimbs = ctx->p->nlimbs; - - /* a · x^2 + y^2 - 1 - b · x^2 · y^2 == 0 */ - ctx->pow2(x, x, ctx); - ctx->pow2(y, y, ctx); - if (ctx->dialect == ECC_DIALECT_ED25519) - ctx->subm(w, ctx->p, x, ctx); - else - ctx->mulm(w, ctx->a, x, ctx); - ctx->addm(w, w, y, ctx); - ctx->mulm(x, x, y, ctx); - ctx->mulm(x, x, ctx->b, ctx); - ctx->subm(w, w, x, ctx); - if (!mpi_cmp_ui(w, 1)) - res = 1; - } - break; - } - -leave: - mpi_free(w); - mpi_free(x); - mpi_free(y); - - return res; -} -EXPORT_SYMBOL_GPL(mpi_ec_curve_point); From d57e2f7cffd57fe2800332dec768ec1b67a4159f Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Sat, 3 Aug 2024 14:49:22 +0800 Subject: [PATCH 22/96] hwrng: bcm2835 - Add missing clk_disable_unprepare in bcm2835_rng_init Add the missing clk_disable_unprepare() before return in bcm2835_rng_init(). Fixes: e5f9f41d5e62 ("hwrng: bcm2835 - add reset support") Cc: Signed-off-by: Gaosheng Cui Reviewed-by: Florian Fainelli Signed-off-by: Herbert Xu --- drivers/char/hw_random/bcm2835-rng.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index b03e80300627..aa2b135e3ee2 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng) return ret; ret = reset_control_reset(priv->reset); - if (ret) + if (ret) { + clk_disable_unprepare(priv->clk); return ret; + } if (priv->mask_interrupts) { /* mask the interrupt */ From 4b7acc85de14ee8a2236f54445dc635d47eceac0 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Sat, 3 Aug 2024 14:49:23 +0800 Subject: [PATCH 23/96] hwrng: cctrng - Add missing clk_disable_unprepare in cctrng_resume Add the missing clk_disable_unprepare() before return in cctrng_resume(). Fixes: a583ed310bb6 ("hwrng: cctrng - introduce Arm CryptoCell driver") Cc: Signed-off-by: Gaosheng Cui Signed-off-by: Herbert Xu --- drivers/char/hw_random/cctrng.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index c0d2f824769f..4c50efc46483 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -622,6 +622,7 @@ static int __maybe_unused cctrng_resume(struct device *dev) /* wait for Cryptocell reset completion */ if (!cctrng_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); + clk_disable_unprepare(drvdata->clk); return -EBUSY; } From 001412493e74d89166d2441b622eeaea00511bdc Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 5 Aug 2024 11:27:13 -0700 Subject: [PATCH 24/96] crypto: x86/aes-gcm - fix PREEMPT_RT issue in gcm_crypt() On PREEMPT_RT, kfree() takes sleeping locks and must not be called with preemption disabled. Therefore, on PREEMPT_RT skcipher_walk_done() must not be called from within a kernel_fpu_{begin,end}() pair, even when it's the last call which is guaranteed to not allocate memory. Therefore, move the last skcipher_walk_done() in gcm_crypt() to the end of the function so that it goes after the kernel_fpu_end(). To make this work cleanly, rework the data processing loop to handle only non-last data segments. Fixes: b06affb1cb58 ("crypto: x86/aes-gcm - add VAES and AVX512 / AVX10 optimized AES-GCM") Reported-by: Sebastian Andrzej Siewior Closes: https://lore.kernel.org/linux-crypto/20240802102333.itejxOsJ@linutronix.de Signed-off-by: Eric Biggers Tested-by: Sebastian Andrzej Siewior Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_glue.c | 59 ++++++++++++++---------------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index cd37de5ec404..d63ba9eaba3e 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags) err = skcipher_walk_aead_encrypt(&walk, req, false); else err = skcipher_walk_aead_decrypt(&walk, req, false); + if (err) + return err; /* * Since the AES-GCM assembly code requires that at least three assembly @@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags) gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags); /* En/decrypt the data and pass the ciphertext through GHASH. */ - while ((nbytes = walk.nbytes) != 0) { - if (unlikely(nbytes < walk.total)) { - /* - * Non-last segment. In this case, the assembly - * function requires that the length be a multiple of 16 - * (AES_BLOCK_SIZE) bytes. The needed buffering of up - * to 16 bytes is handled by the skcipher_walk. Here we - * just need to round down to a multiple of 16. - */ - nbytes = round_down(nbytes, AES_BLOCK_SIZE); - aes_gcm_update(key, le_ctr, ghash_acc, - walk.src.virt.addr, walk.dst.virt.addr, - nbytes, flags); - le_ctr[0] += nbytes / AES_BLOCK_SIZE; - kernel_fpu_end(); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - kernel_fpu_begin(); - } else { - /* Last segment: process all remaining data. */ - aes_gcm_update(key, le_ctr, ghash_acc, - walk.src.virt.addr, walk.dst.virt.addr, - nbytes, flags); - err = skcipher_walk_done(&walk, 0); - /* - * The low word of the counter isn't used by the - * finalize, so there's no need to increment it here. - */ - } + while (unlikely((nbytes = walk.nbytes) < walk.total)) { + /* + * Non-last segment. In this case, the assembly function + * requires that the length be a multiple of 16 (AES_BLOCK_SIZE) + * bytes. The needed buffering of up to 16 bytes is handled by + * the skcipher_walk. Here we just need to round down to a + * multiple of 16. + */ + nbytes = round_down(nbytes, AES_BLOCK_SIZE); + aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr, + walk.dst.virt.addr, nbytes, flags); + le_ctr[0] += nbytes / AES_BLOCK_SIZE; + kernel_fpu_end(); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + if (err) + return err; + kernel_fpu_begin(); } - if (err) - goto out; + /* Last segment: process all remaining data. */ + aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr, + walk.dst.virt.addr, nbytes, flags); + /* + * The low word of the counter isn't used by the finalize, so there's no + * need to increment it here. + */ /* Finalize */ taglen = crypto_aead_authsize(tfm); @@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags) datalen, tag, taglen, flags)) err = -EBADMSG; } -out: kernel_fpu_end(); + if (nbytes) + skcipher_walk_done(&walk, 0); return err; } From 2d6213bd592b4731b53ece3492f9d1d18e97eb5e Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 12 Aug 2024 10:42:35 +1000 Subject: [PATCH 25/96] crypto: spacc - Add ifndef around MIN Fixup for "crypto: spacc - Add SPAcc Skcipher support" interacting with commit 1a251f52cfdc ("minmax: make generic MIN() and MAX() macros available everywhere") from Linus' tree. Signed-off-by: Stephen Rothwell Reintroduced MIN macro with ifndef around it. Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_manager.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/dwc-spacc/spacc_manager.c b/drivers/crypto/dwc-spacc/spacc_manager.c index 3b26b27a998f..d42ae499e959 100644 --- a/drivers/crypto/dwc-spacc/spacc_manager.c +++ b/drivers/crypto/dwc-spacc/spacc_manager.c @@ -1,8 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include "spacc_core.h" +#ifndef MIN #define MIN(x, y) (((x) < (y)) ? (x) : (y)) +#endif /* prevent reading past the end of the buffer */ static void read_from_buf(unsigned char *dst, unsigned char *src, From 18e2188c4b2069cf52c12344b47b4f7ac982f555 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 6 Aug 2024 00:11:30 +0200 Subject: [PATCH 26/96] crypto: chacha20poly1305 - Annotate struct chachapoly_ctx with __counted_by() Add the __counted_by compiler attribute to the flexible array member salt to improve access bounds-checking via CONFIG_UBSAN_BOUNDS and CONFIG_FORTIFY_SOURCE. Reviewed-by: Kees Cook Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 9e4651330852..d740849f1c19 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -27,7 +27,7 @@ struct chachapoly_ctx { struct crypto_ahash *poly; /* key bytes we use for the ChaCha20 IV */ unsigned int saltlen; - u8 salt[]; + u8 salt[] __counted_by(saltlen); }; struct poly_req { From 9369693a2c8f4832f6cdc8a94cc63cc7f7f9c96c Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 6 Aug 2024 05:54:44 +0000 Subject: [PATCH 27/96] crypto: arm64/poly1305 - move data to rodata section When objtool gains support for ARM in the future, it may encounter issues disassembling the following data in the .text section: > .Lzeros: > .long 0,0,0,0,0,0,0,0 > .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" > .align 2 Move it to .rodata which is a more appropriate section for read-only data. There is a limit on how far the label can be from the instruction, hence use "adrp" and low 12bits offset of the label to avoid the compilation error. Signed-off-by: Jia He Tested-by: Daniel Gomez Signed-off-by: Herbert Xu --- arch/arm64/crypto/poly1305-armv8.pl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl index cbc980fb02e3..22c9069c0650 100644 --- a/arch/arm64/crypto/poly1305-armv8.pl +++ b/arch/arm64/crypto/poly1305-armv8.pl @@ -473,7 +473,8 @@ poly1305_blocks_neon: subs $len,$len,#64 ldp x9,x13,[$inp,#48] add $in2,$inp,#96 - adr $zeros,.Lzeros + adrp $zeros,.Lzeros + add $zeros,$zeros,#:lo12:.Lzeros lsl $padbit,$padbit,#24 add x15,$ctx,#48 @@ -885,10 +886,13 @@ poly1305_blocks_neon: ret .size poly1305_blocks_neon,.-poly1305_blocks_neon +.pushsection .rodata .align 5 .Lzeros: .long 0,0,0,0,0,0,0,0 .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" +.popsection + .align 2 #if !defined(__KERNEL__) && !defined(_WIN64) .comm OPENSSL_armcap_P,4,4 From 8caa061cfdf65bca9a3ceb4ce2b2b8e5fb98c1e8 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 8 Aug 2024 16:05:26 -0600 Subject: [PATCH 28/96] crypto: qat - Use static_assert() to check struct sizes Commit 140e4c85d540 ("crypto: qat - Avoid -Wflex-array-member-not-at-end warnings") introduced tagged `struct qat_alg_buf_list_hdr`. We want to ensure that when new members need to be added to the flexible structure, they are always included within this tagged struct. So, we use `static_assert()` to ensure that the memory layout for both the flexible structure and the tagged struct is the same after any changes. Signed-off-by: Gustavo A. R. Silva Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_bl.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index 85bc32a9ec0e..3f5b79015400 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -23,6 +23,8 @@ struct qat_alg_buf_list { ); struct qat_alg_buf buffers[]; } __packed; +static_assert(offsetof(struct qat_alg_buf_list, buffers) == sizeof(struct qat_alg_buf_list_hdr), + "struct member likely outside of __struct_group()"); struct qat_alg_fixed_buf_list { struct qat_alg_buf_list_hdr sgl_hdr; From f2f853e7ae4cb0aaf758fa30c07689a8d82680bc Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 8 Aug 2024 16:09:06 -0600 Subject: [PATCH 29/96] crypto: nx - Use static_assert() to check struct sizes Commit 1e6b251ce175 ("crypto: nx - Avoid -Wflex-array-member-not-at-end warning") introduced tagged `struct nx842_crypto_header_hdr`. We want to ensure that when new members need to be added to the flexible structure, they are always included within this tagged struct. So, we use `static_assert()` to ensure that the memory layout for both the flexible structure and the tagged struct is the same after any changes. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 25fa70b2112c..887d4ce3cb49 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -157,6 +157,7 @@ struct nx842_crypto_header_group { } __packed; struct nx842_crypto_header { + /* New members MUST be added within the struct_group() macro below. */ struct_group_tagged(nx842_crypto_header_hdr, hdr, __be16 magic; /* NX842_CRYPTO_MAGIC */ __be16 ignore; /* decompressed end bytes to ignore */ @@ -164,6 +165,8 @@ struct nx842_crypto_header { ); struct nx842_crypto_header_group group[]; } __packed; +static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr), + "struct member likely outside of struct_group_tagged()"); #define NX842_CRYPTO_GROUP_MAX (0x20) From 9a22b2812393d93d84358a760c347c21939029a6 Mon Sep 17 00:00:00 2001 From: VanGiang Nguyen Date: Fri, 9 Aug 2024 06:21:42 +0000 Subject: [PATCH 30/96] padata: use integer wrap around to prevent deadlock on seq_nr overflow When submitting more than 2^32 padata objects to padata_do_serial, the current sorting implementation incorrectly sorts padata objects with overflowed seq_nr, causing them to be placed before existing objects in the reorder list. This leads to a deadlock in the serialization process as padata_find_next cannot match padata->seq_nr and pd->processed because the padata instance with overflowed seq_nr will be selected next. To fix this, we use an unsigned integer wrap around to correctly sort padata objects in scenarios with integer overflow. Fixes: bfde23ce200e ("padata: unbind parallel jobs from specific CPUs") Cc: Co-developed-by: Christian Gafert Signed-off-by: Christian Gafert Co-developed-by: Max Ferger Signed-off-by: Max Ferger Signed-off-by: Van Giang Nguyen Acked-by: Daniel Jordan Signed-off-by: Herbert Xu --- kernel/padata.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/padata.c b/kernel/padata.c index 53f4bc912712..222bccd0c96b 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -404,7 +404,8 @@ void padata_do_serial(struct padata_priv *padata) /* Sort in ascending order of sequence number. */ list_for_each_prev(pos, &reorder->list) { cur = list_entry(pos, struct padata_priv, list); - if (cur->seq_nr < padata->seq_nr) + /* Compare by difference to consider integer wrap around */ + if ((signed int)(cur->seq_nr - padata->seq_nr) < 0) break; } list_add(&padata->list, pos); From f235bc11cc95fcd5847e8249d4c1c9ae5979701c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 9 Aug 2024 16:11:49 -0700 Subject: [PATCH 31/96] crypto: arm/aes-neonbs - go back to using aes-arm directly In aes-neonbs, instead of going through the crypto API for the parts that the bit-sliced AES code doesn't handle, namely AES-CBC encryption and single-block AES, just call the ARM scalar AES cipher directly. This basically goes back to the original approach that was used before commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher at runtime"). Calling the ARM scalar AES cipher directly is faster, simpler, and avoids any chance of bugs specific to the use of fallback ciphers such as module loading deadlocks which have happened twice. The deadlocks turned out to be fixable in other ways, but there's no need to rely on anything so fragile in the first place. The rationale for the above-mentioned commit was to allow people to choose to use a time-invariant AES implementation for the fallback cipher. There are a couple problems with that rationale, though: - In practice the ARM scalar AES cipher (aes-arm) was used anyway, since it has a higher priority than aes-fixed-time. Users *could* go out of their way to disable or blacklist aes-arm, or to lower its priority using NETLINK_CRYPTO, but very few users customize the crypto API to this extent. Systems with the ARMv8 Crypto Extensions used aes-ce, but the bit-sliced algorithms are irrelevant on such systems anyway. - Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening against cache-timing attacks"), the ARM scalar AES cipher is partially hardened against cache-timing attacks. It actually works like aes-fixed-time, in that it disables interrupts and prefetches its lookup table. It does use a larger table than aes-fixed-time, but even so, it is not clear that aes-fixed-time is meaningfully more time-invariant than aes-arm. And of course, the real solution for time-invariant AES is to use a CPU that supports AES instructions. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/arm/crypto/Kconfig | 14 +++- arch/arm/crypto/aes-cipher-glue.c | 5 +- arch/arm/crypto/aes-cipher.h | 13 +++ arch/arm/crypto/aes-neonbs-glue.c | 129 +++++++++--------------------- 4 files changed, 66 insertions(+), 95 deletions(-) create mode 100644 arch/arm/crypto/aes-cipher.h diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 847b7a003356..5ff49a5e9afc 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -166,10 +166,9 @@ config CRYPTO_AES_ARM config CRYPTO_AES_ARM_BS tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)" depends on KERNEL_MODE_NEON + select CRYPTO_AES_ARM select CRYPTO_SKCIPHER select CRYPTO_LIB_AES - select CRYPTO_AES - select CRYPTO_CBC select CRYPTO_SIMD help Length-preserving ciphers: AES cipher algorithms (FIPS-197) @@ -183,8 +182,15 @@ config CRYPTO_AES_ARM_BS Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode and for XTS mode encryption, CBC and XTS mode decryption speedup is around 25%. (CBC encryption speed is not affected by this driver.) - This implementation does not rely on any lookup tables so it is - believed to be invulnerable to cache timing attacks. + + The bit sliced AES code does not use lookup tables, so it is believed + to be invulnerable to cache timing attacks. However, since the bit + sliced AES code cannot process single blocks efficiently, in certain + cases table-based code with some countermeasures against cache timing + attacks will still be used as a fallback method; specifically CBC + encryption (not CBC decryption), the encryption of XTS tweaks, XTS + ciphertext stealing when the message isn't a multiple of 16 bytes, and + CTR when invoked in a context in which NEON instructions are unusable. config CRYPTO_AES_ARM_CE tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)" diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c index 6dfaef2d8f91..29efb7833960 100644 --- a/arch/arm/crypto/aes-cipher-glue.c +++ b/arch/arm/crypto/aes-cipher-glue.c @@ -9,9 +9,10 @@ #include #include #include +#include "aes-cipher.h" -asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out); -asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out); +EXPORT_SYMBOL_GPL(__aes_arm_encrypt); +EXPORT_SYMBOL_GPL(__aes_arm_decrypt); static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { diff --git a/arch/arm/crypto/aes-cipher.h b/arch/arm/crypto/aes-cipher.h new file mode 100644 index 000000000000..d5db2b87eb69 --- /dev/null +++ b/arch/arm/crypto/aes-cipher.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef ARM_CRYPTO_AES_CIPHER_H +#define ARM_CRYPTO_AES_CIPHER_H + +#include +#include + +asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, + const u8 *in, u8 *out); +asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds, + const u8 *in, u8 *out); + +#endif /* ARM_CRYPTO_AES_CIPHER_H */ diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index 201eb35dde37..fd04f855b2f5 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -9,24 +9,22 @@ #include #include #include -#include #include #include #include #include #include +#include "aes-cipher.h" MODULE_AUTHOR("Ard Biesheuvel "); MODULE_DESCRIPTION("Bit sliced AES using NEON instructions"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("ecb(aes)"); -MODULE_ALIAS_CRYPTO("cbc(aes)-all"); +MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); - asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], @@ -52,13 +50,13 @@ struct aesbs_ctx { struct aesbs_cbc_ctx { struct aesbs_ctx key; - struct crypto_skcipher *enc_tfm; + struct crypto_aes_ctx fallback; }; struct aesbs_xts_ctx { struct aesbs_ctx key; - struct crypto_cipher *cts_tfm; - struct crypto_cipher *tweak_tfm; + struct crypto_aes_ctx fallback; + struct crypto_aes_ctx tweak_key; }; struct aesbs_ctr_ctx { @@ -129,37 +127,49 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_aes_ctx rk; int err; - err = aes_expandkey(&rk, in_key, key_len); + err = aes_expandkey(&ctx->fallback, in_key, key_len); if (err) return err; ctx->key.rounds = 6 + key_len / 4; kernel_neon_begin(); - aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); + aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds); kernel_neon_end(); - memzero_explicit(&rk, sizeof(rk)); - return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len); + return 0; } static int cbc_encrypt(struct skcipher_request *req) { - struct skcipher_request *subreq = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; - skcipher_request_set_tfm(subreq, ctx->enc_tfm); - skcipher_request_set_callback(subreq, - skcipher_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); + err = skcipher_walk_virt(&walk, req, false); - return crypto_skcipher_encrypt(subreq); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 *prev = walk.iv; + + do { + crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE); + __aes_arm_encrypt(ctx->fallback.key_enc, + ctx->key.rounds, dst, dst); + prev = dst; + src += AES_BLOCK_SIZE; + dst += AES_BLOCK_SIZE; + nbytes -= AES_BLOCK_SIZE; + } while (nbytes >= AES_BLOCK_SIZE); + memcpy(walk.iv, prev, AES_BLOCK_SIZE); + err = skcipher_walk_done(&walk, nbytes); + } + return err; } static int cbc_decrypt(struct skcipher_request *req) @@ -190,30 +200,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static int cbc_init(struct crypto_skcipher *tfm) -{ - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - unsigned int reqsize; - - ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->enc_tfm)) - return PTR_ERR(ctx->enc_tfm); - - reqsize = sizeof(struct skcipher_request); - reqsize += crypto_skcipher_reqsize(ctx->enc_tfm); - crypto_skcipher_set_reqsize(tfm, reqsize); - - return 0; -} - -static void cbc_exit(struct crypto_skcipher *tfm) -{ - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_skcipher(ctx->enc_tfm); -} - static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { @@ -271,16 +257,8 @@ static int ctr_encrypt(struct skcipher_request *req) static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); - unsigned long flags; - /* - * Temporarily disable interrupts to avoid races where - * cachelines are evicted when the CPU is interrupted - * to do something else. - */ - local_irq_save(flags); - aes_encrypt(&ctx->fallback, dst, src); - local_irq_restore(flags); + __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst); } static int ctr_encrypt_sync(struct skcipher_request *req) @@ -302,45 +280,23 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, return err; key_len /= 2; - err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len); + err = aes_expandkey(&ctx->fallback, in_key, key_len); if (err) return err; - err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); + err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len); if (err) return err; return aesbs_setkey(tfm, in_key, key_len); } -static int xts_init(struct crypto_skcipher *tfm) -{ - struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->cts_tfm)) - return PTR_ERR(ctx->cts_tfm); - - ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->tweak_tfm)) - crypto_free_cipher(ctx->cts_tfm); - - return PTR_ERR_OR_ZERO(ctx->tweak_tfm); -} - -static void xts_exit(struct crypto_skcipher *tfm) -{ - struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->tweak_tfm); - crypto_free_cipher(ctx->cts_tfm); -} - static int __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[], int)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + const int rounds = ctx->key.rounds; int tail = req->cryptlen % AES_BLOCK_SIZE; struct skcipher_request subreq; u8 buf[2 * AES_BLOCK_SIZE]; @@ -364,7 +320,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, if (err) return err; - crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); + __aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -378,7 +334,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, - ctx->key.rounds, blocks, walk.iv, reorder_last_tweak); + rounds, blocks, walk.iv, reorder_last_tweak); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); @@ -396,9 +352,9 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, crypto_xor(buf, req->iv, AES_BLOCK_SIZE); if (encrypt) - crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf); + __aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf); else - crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf); + __aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf); crypto_xor(buf, req->iv, AES_BLOCK_SIZE); @@ -439,8 +395,7 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL | - CRYPTO_ALG_NEED_FALLBACK, + .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -449,8 +404,6 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_cbc_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - .init = cbc_init, - .exit = cbc_exit, }, { .base.cra_name = "__ctr(aes)", .base.cra_driver_name = "__ctr-aes-neonbs", @@ -500,8 +453,6 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_xts_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, - .init = xts_init, - .exit = xts_exit, } }; static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; From fca5cb4dd2b4a9423cb6d112cc71c33899955a1f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 10 Aug 2024 14:20:55 +0800 Subject: [PATCH 32/96] Revert "lib/mpi: Extend the MPI library" This partially reverts commit a8ea8bdd9df92a0e5db5b43900abb7a288b8a53e. Most of it is no longer needed since sm2 has been removed. However, the following functions have been kept as they have developed other uses: mpi_copy mpi_mod mpi_test_bit mpi_set_bit mpi_rshift mpi_add mpi_sub mpi_addm mpi_subm mpi_mul mpi_mulm mpi_tdiv_r mpi_fdiv_r Signed-off-by: Herbert Xu --- include/linux/mpi.h | 65 ------- lib/crypto/mpi/Makefile | 1 - lib/crypto/mpi/mpi-add.c | 51 ------ lib/crypto/mpi/mpi-bit.c | 143 --------------- lib/crypto/mpi/mpi-cmp.c | 46 +---- lib/crypto/mpi/mpi-div.c | 29 --- lib/crypto/mpi/mpi-internal.h | 10 - lib/crypto/mpi/mpi-inv.c | 143 --------------- lib/crypto/mpi/mpi-mod.c | 144 --------------- lib/crypto/mpi/mpicoder.c | 336 ---------------------------------- lib/crypto/mpi/mpih-mul.c | 25 --- lib/crypto/mpi/mpiutil.c | 182 ------------------ 12 files changed, 10 insertions(+), 1165 deletions(-) delete mode 100644 lib/crypto/mpi/mpi-inv.c diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 89b720893e12..e081428b91ef 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -40,87 +40,33 @@ struct gcry_mpi { typedef struct gcry_mpi *MPI; #define mpi_get_nlimbs(a) ((a)->nlimbs) -#define mpi_has_sign(a) ((a)->sign) /*-- mpiutil.c --*/ MPI mpi_alloc(unsigned nlimbs); -void mpi_clear(MPI a); void mpi_free(MPI a); int mpi_resize(MPI a, unsigned nlimbs); -static inline MPI mpi_new(unsigned int nbits) -{ - return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB); -} - MPI mpi_copy(MPI a); -MPI mpi_alloc_like(MPI a); -void mpi_snatch(MPI w, MPI u); -MPI mpi_set(MPI w, MPI u); -MPI mpi_set_ui(MPI w, unsigned long u); -MPI mpi_alloc_set_ui(unsigned long u); -void mpi_swap_cond(MPI a, MPI b, unsigned long swap); - -/* Constants used to return constant MPIs. See mpi_init if you - * want to add more constants. - */ -#define MPI_NUMBER_OF_CONSTANTS 6 -enum gcry_mpi_constants { - MPI_C_ZERO, - MPI_C_ONE, - MPI_C_TWO, - MPI_C_THREE, - MPI_C_FOUR, - MPI_C_EIGHT -}; - -MPI mpi_const(enum gcry_mpi_constants no); /*-- mpicoder.c --*/ - -/* Different formats of external big integer representation. */ -enum gcry_mpi_format { - GCRYMPI_FMT_NONE = 0, - GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */ - GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */ - GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */ - GCRYMPI_FMT_HEX = 4, /* Hex format. */ - GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */ - GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */ -}; - MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes); MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); -int mpi_fromstr(MPI val, const char *str); -MPI mpi_scanval(const char *string); MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len); void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, int *sign); -int mpi_print(enum gcry_mpi_format format, unsigned char *buffer, - size_t buflen, size_t *nwritten, MPI a); /*-- mpi-mod.c --*/ void mpi_mod(MPI rem, MPI dividend, MPI divisor); -/* Context used with Barrett reduction. */ -struct barrett_ctx_s; -typedef struct barrett_ctx_s *mpi_barrett_t; - -mpi_barrett_t mpi_barrett_init(MPI m, int copy); -void mpi_barrett_free(mpi_barrett_t ctx); -void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx); -void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx); - /*-- mpi-pow.c --*/ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); /*-- mpi-cmp.c --*/ int mpi_cmp_ui(MPI u, ulong v); int mpi_cmp(MPI u, MPI v); -int mpi_cmpabs(MPI u, MPI v); /*-- mpi-sub-ui.c --*/ int mpi_sub_ui(MPI w, MPI u, unsigned long vval); @@ -130,16 +76,9 @@ void mpi_normalize(MPI a); unsigned mpi_get_nbits(MPI a); int mpi_test_bit(MPI a, unsigned int n); void mpi_set_bit(MPI a, unsigned int n); -void mpi_set_highbit(MPI a, unsigned int n); -void mpi_clear_highbit(MPI a, unsigned int n); -void mpi_clear_bit(MPI a, unsigned int n); -void mpi_rshift_limbs(MPI a, unsigned int count); void mpi_rshift(MPI x, MPI a, unsigned int n); -void mpi_lshift_limbs(MPI a, unsigned int count); -void mpi_lshift(MPI x, MPI a, unsigned int n); /*-- mpi-add.c --*/ -void mpi_add_ui(MPI w, MPI u, unsigned long v); void mpi_add(MPI w, MPI u, MPI v); void mpi_sub(MPI w, MPI u, MPI v); void mpi_addm(MPI w, MPI u, MPI v, MPI m); @@ -152,10 +91,6 @@ void mpi_mulm(MPI w, MPI u, MPI v, MPI m); /*-- mpi-div.c --*/ void mpi_tdiv_r(MPI rem, MPI num, MPI den); void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor); -void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor); - -/*-- mpi-inv.c --*/ -int mpi_invm(MPI x, MPI a, MPI n); /* inline functions */ diff --git a/lib/crypto/mpi/Makefile b/lib/crypto/mpi/Makefile index 477debd7ed50..9ad84079025a 100644 --- a/lib/crypto/mpi/Makefile +++ b/lib/crypto/mpi/Makefile @@ -19,7 +19,6 @@ mpi-y = \ mpi-cmp.o \ mpi-sub-ui.o \ mpi-div.o \ - mpi-inv.o \ mpi-mod.o \ mpi-mul.o \ mpih-cmp.o \ diff --git a/lib/crypto/mpi/mpi-add.c b/lib/crypto/mpi/mpi-add.c index 9056fc5167fc..b47c8c35f5fe 100644 --- a/lib/crypto/mpi/mpi-add.c +++ b/lib/crypto/mpi/mpi-add.c @@ -13,57 +13,6 @@ #include "mpi-internal.h" -/**************** - * Add the unsigned integer V to the mpi-integer U and store the - * result in W. U and V may be the same. - */ -void mpi_add_ui(MPI w, MPI u, unsigned long v) -{ - mpi_ptr_t wp, up; - mpi_size_t usize, wsize; - int usign, wsign; - - usize = u->nlimbs; - usign = u->sign; - wsign = 0; - - /* If not space for W (and possible carry), increase space. */ - wsize = usize + 1; - if (w->alloced < wsize) - mpi_resize(w, wsize); - - /* These must be after realloc (U may be the same as W). */ - up = u->d; - wp = w->d; - - if (!usize) { /* simple */ - wp[0] = v; - wsize = v ? 1:0; - } else if (!usign) { /* mpi is not negative */ - mpi_limb_t cy; - cy = mpihelp_add_1(wp, up, usize, v); - wp[usize] = cy; - wsize = usize + cy; - } else { - /* The signs are different. Need exact comparison to determine - * which operand to subtract from which. - */ - if (usize == 1 && up[0] < v) { - wp[0] = v - up[0]; - wsize = 1; - } else { - mpihelp_sub_1(wp, up, usize, v); - /* Size can decrease with at most one limb. */ - wsize = usize - (wp[usize-1] == 0); - wsign = 1; - } - } - - w->nlimbs = wsize; - w->sign = wsign; -} - - void mpi_add(MPI w, MPI u, MPI v) { mpi_ptr_t wp, up, vp; diff --git a/lib/crypto/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c index e08fc202ea5c..c29b85362664 100644 --- a/lib/crypto/mpi/mpi-bit.c +++ b/lib/crypto/mpi/mpi-bit.c @@ -32,7 +32,6 @@ void mpi_normalize(MPI a) for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--) ; } -EXPORT_SYMBOL_GPL(mpi_normalize); /**************** * Return the number of bits in A. @@ -93,85 +92,6 @@ void mpi_set_bit(MPI a, unsigned int n) a->d[limbno] |= (A_LIMB_1<= a->nlimbs) { - for (i = a->nlimbs; i < a->alloced; i++) - a->d[i] = 0; - mpi_resize(a, limbno+1); - a->nlimbs = limbno+1; - } - a->d[limbno] |= (A_LIMB_1<d[limbno] &= ~(A_LIMB_1 << bitno); - a->nlimbs = limbno+1; -} -EXPORT_SYMBOL_GPL(mpi_set_highbit); - -/**************** - * clear bit N of A and all bits above - */ -void mpi_clear_highbit(MPI a, unsigned int n) -{ - unsigned int limbno, bitno; - - limbno = n / BITS_PER_MPI_LIMB; - bitno = n % BITS_PER_MPI_LIMB; - - if (limbno >= a->nlimbs) - return; /* not allocated, therefore no need to clear bits :-) */ - - for ( ; bitno < BITS_PER_MPI_LIMB; bitno++) - a->d[limbno] &= ~(A_LIMB_1 << bitno); - a->nlimbs = limbno+1; -} - -/**************** - * Clear bit N of A. - */ -void mpi_clear_bit(MPI a, unsigned int n) -{ - unsigned int limbno, bitno; - - limbno = n / BITS_PER_MPI_LIMB; - bitno = n % BITS_PER_MPI_LIMB; - - if (limbno >= a->nlimbs) - return; /* Don't need to clear this bit, it's far too left. */ - a->d[limbno] &= ~(A_LIMB_1 << bitno); -} -EXPORT_SYMBOL_GPL(mpi_clear_bit); - - -/**************** - * Shift A by COUNT limbs to the right - * This is used only within the MPI library - */ -void mpi_rshift_limbs(MPI a, unsigned int count) -{ - mpi_ptr_t ap = a->d; - mpi_size_t n = a->nlimbs; - unsigned int i; - - if (count >= n) { - a->nlimbs = 0; - return; - } - - for (i = 0; i < n - count; i++) - ap[i] = ap[i+count]; - ap[i] = 0; - a->nlimbs -= count; -} - /* * Shift A by N bits to the right. */ @@ -241,66 +161,3 @@ void mpi_rshift(MPI x, MPI a, unsigned int n) MPN_NORMALIZE(x->d, x->nlimbs); } EXPORT_SYMBOL_GPL(mpi_rshift); - -/**************** - * Shift A by COUNT limbs to the left - * This is used only within the MPI library - */ -void mpi_lshift_limbs(MPI a, unsigned int count) -{ - mpi_ptr_t ap; - int n = a->nlimbs; - int i; - - if (!count || !n) - return; - - RESIZE_IF_NEEDED(a, n+count); - - ap = a->d; - for (i = n-1; i >= 0; i--) - ap[i+count] = ap[i]; - for (i = 0; i < count; i++) - ap[i] = 0; - a->nlimbs += count; -} - -/* - * Shift A by N bits to the left. - */ -void mpi_lshift(MPI x, MPI a, unsigned int n) -{ - unsigned int nlimbs = (n/BITS_PER_MPI_LIMB); - unsigned int nbits = (n%BITS_PER_MPI_LIMB); - - if (x == a && !n) - return; /* In-place shift with an amount of zero. */ - - if (x != a) { - /* Copy A to X. */ - unsigned int alimbs = a->nlimbs; - int asign = a->sign; - mpi_ptr_t xp, ap; - - RESIZE_IF_NEEDED(x, alimbs+nlimbs+1); - xp = x->d; - ap = a->d; - MPN_COPY(xp, ap, alimbs); - x->nlimbs = alimbs; - x->flags = a->flags; - x->sign = asign; - } - - if (nlimbs && !nbits) { - /* Shift a full number of limbs. */ - mpi_lshift_limbs(x, nlimbs); - } else if (n) { - /* We use a very dump approach: Shift left by the number of - * limbs plus one and than fix it up by an rshift. - */ - mpi_lshift_limbs(x, nlimbs+1); - mpi_rshift(x, x, BITS_PER_MPI_LIMB - nbits); - } - - MPN_NORMALIZE(x->d, x->nlimbs); -} diff --git a/lib/crypto/mpi/mpi-cmp.c b/lib/crypto/mpi/mpi-cmp.c index 0835b6213235..ceaebe181cd7 100644 --- a/lib/crypto/mpi/mpi-cmp.c +++ b/lib/crypto/mpi/mpi-cmp.c @@ -45,54 +45,28 @@ int mpi_cmp_ui(MPI u, unsigned long v) } EXPORT_SYMBOL_GPL(mpi_cmp_ui); -static int do_mpi_cmp(MPI u, MPI v, int absmode) +int mpi_cmp(MPI u, MPI v) { - mpi_size_t usize; - mpi_size_t vsize; - int usign; - int vsign; + mpi_size_t usize, vsize; int cmp; mpi_normalize(u); mpi_normalize(v); - usize = u->nlimbs; vsize = v->nlimbs; - usign = absmode ? 0 : u->sign; - vsign = absmode ? 0 : v->sign; - - /* Compare sign bits. */ - - if (!usign && vsign) + if (!u->sign && v->sign) return 1; - if (usign && !vsign) + if (u->sign && !v->sign) return -1; - - /* U and V are either both positive or both negative. */ - - if (usize != vsize && !usign && !vsign) + if (usize != vsize && !u->sign && !v->sign) return usize - vsize; - if (usize != vsize && usign && vsign) - return vsize + usize; + if (usize != vsize && u->sign && v->sign) + return vsize - usize; if (!usize) return 0; cmp = mpihelp_cmp(u->d, v->d, usize); - if (!cmp) - return 0; - if ((cmp < 0?1:0) == (usign?1:0)) - return 1; - - return -1; -} - -int mpi_cmp(MPI u, MPI v) -{ - return do_mpi_cmp(u, v, 0); + if (u->sign) + return -cmp; + return cmp; } EXPORT_SYMBOL_GPL(mpi_cmp); - -int mpi_cmpabs(MPI u, MPI v) -{ - return do_mpi_cmp(u, v, 1); -} -EXPORT_SYMBOL_GPL(mpi_cmpabs); diff --git a/lib/crypto/mpi/mpi-div.c b/lib/crypto/mpi/mpi-div.c index 45beab8b9e9e..2ff0ebd74fd7 100644 --- a/lib/crypto/mpi/mpi-div.c +++ b/lib/crypto/mpi/mpi-div.c @@ -15,7 +15,6 @@ #include "longlong.h" void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den); -void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor); void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor) { @@ -40,34 +39,6 @@ void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor) mpi_free(temp_divisor); } -void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor) -{ - MPI tmp = mpi_alloc(mpi_get_nlimbs(quot)); - mpi_fdiv_qr(quot, tmp, dividend, divisor); - mpi_free(tmp); -} - -void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor) -{ - int divisor_sign = divisor->sign; - MPI temp_divisor = NULL; - - if (quot == divisor || rem == divisor) { - temp_divisor = mpi_copy(divisor); - divisor = temp_divisor; - } - - mpi_tdiv_qr(quot, rem, dividend, divisor); - - if ((divisor_sign ^ dividend->sign) && rem->nlimbs) { - mpi_sub_ui(quot, quot, 1); - mpi_add(rem, rem, divisor); - } - - if (temp_divisor) - mpi_free(temp_divisor); -} - /* If den == quot, den needs temporary storage. * If den == rem, den needs temporary storage. * If num == quot, num needs temporary storage. diff --git a/lib/crypto/mpi/mpi-internal.h b/lib/crypto/mpi/mpi-internal.h index 554002182db1..b6fbb43afbc8 100644 --- a/lib/crypto/mpi/mpi-internal.h +++ b/lib/crypto/mpi/mpi-internal.h @@ -66,14 +66,6 @@ typedef int mpi_size_t; /* (must be a signed type) */ (d)[_i] = (s)[_i]; \ } while (0) -#define MPN_COPY_INCR(d, s, n) \ - do { \ - mpi_size_t _i; \ - for (_i = 0; _i < (n); _i++) \ - (d)[_i] = (s)[_i]; \ - } while (0) - - #define MPN_COPY_DECR(d, s, n) \ do { \ mpi_size_t _i; \ @@ -181,8 +173,6 @@ int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size); void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace); -void mpihelp_mul_n(mpi_ptr_t prodp, - mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size); int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, diff --git a/lib/crypto/mpi/mpi-inv.c b/lib/crypto/mpi/mpi-inv.c deleted file mode 100644 index 61e37d18f793..000000000000 --- a/lib/crypto/mpi/mpi-inv.c +++ /dev/null @@ -1,143 +0,0 @@ -/* mpi-inv.c - MPI functions - * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc. - * - * This file is part of Libgcrypt. - * - * Libgcrypt is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * Libgcrypt is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, see . - */ - -#include "mpi-internal.h" - -/**************** - * Calculate the multiplicative inverse X of A mod N - * That is: Find the solution x for - * 1 = (a*x) mod n - */ -int mpi_invm(MPI x, MPI a, MPI n) -{ - /* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X) - * modified according to Michael Penk's solution for Exercise 35 - * with further enhancement - */ - MPI u, v, u1, u2 = NULL, u3, v1, v2 = NULL, v3, t1, t2 = NULL, t3; - unsigned int k; - int sign; - int odd; - - if (!mpi_cmp_ui(a, 0)) - return 0; /* Inverse does not exists. */ - if (!mpi_cmp_ui(n, 1)) - return 0; /* Inverse does not exists. */ - - u = mpi_copy(a); - v = mpi_copy(n); - - for (k = 0; !mpi_test_bit(u, 0) && !mpi_test_bit(v, 0); k++) { - mpi_rshift(u, u, 1); - mpi_rshift(v, v, 1); - } - odd = mpi_test_bit(v, 0); - - u1 = mpi_alloc_set_ui(1); - if (!odd) - u2 = mpi_alloc_set_ui(0); - u3 = mpi_copy(u); - v1 = mpi_copy(v); - if (!odd) { - v2 = mpi_alloc(mpi_get_nlimbs(u)); - mpi_sub(v2, u1, u); /* U is used as const 1 */ - } - v3 = mpi_copy(v); - if (mpi_test_bit(u, 0)) { /* u is odd */ - t1 = mpi_alloc_set_ui(0); - if (!odd) { - t2 = mpi_alloc_set_ui(1); - t2->sign = 1; - } - t3 = mpi_copy(v); - t3->sign = !t3->sign; - goto Y4; - } else { - t1 = mpi_alloc_set_ui(1); - if (!odd) - t2 = mpi_alloc_set_ui(0); - t3 = mpi_copy(u); - } - - do { - do { - if (!odd) { - if (mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0)) { - /* one is odd */ - mpi_add(t1, t1, v); - mpi_sub(t2, t2, u); - } - mpi_rshift(t1, t1, 1); - mpi_rshift(t2, t2, 1); - mpi_rshift(t3, t3, 1); - } else { - if (mpi_test_bit(t1, 0)) - mpi_add(t1, t1, v); - mpi_rshift(t1, t1, 1); - mpi_rshift(t3, t3, 1); - } -Y4: - ; - } while (!mpi_test_bit(t3, 0)); /* while t3 is even */ - - if (!t3->sign) { - mpi_set(u1, t1); - if (!odd) - mpi_set(u2, t2); - mpi_set(u3, t3); - } else { - mpi_sub(v1, v, t1); - sign = u->sign; u->sign = !u->sign; - if (!odd) - mpi_sub(v2, u, t2); - u->sign = sign; - sign = t3->sign; t3->sign = !t3->sign; - mpi_set(v3, t3); - t3->sign = sign; - } - mpi_sub(t1, u1, v1); - if (!odd) - mpi_sub(t2, u2, v2); - mpi_sub(t3, u3, v3); - if (t1->sign) { - mpi_add(t1, t1, v); - if (!odd) - mpi_sub(t2, t2, u); - } - } while (mpi_cmp_ui(t3, 0)); /* while t3 != 0 */ - /* mpi_lshift( u3, k ); */ - mpi_set(x, u1); - - mpi_free(u1); - mpi_free(v1); - mpi_free(t1); - if (!odd) { - mpi_free(u2); - mpi_free(v2); - mpi_free(t2); - } - mpi_free(u3); - mpi_free(v3); - mpi_free(t3); - - mpi_free(u); - mpi_free(v); - return 1; -} -EXPORT_SYMBOL_GPL(mpi_invm); diff --git a/lib/crypto/mpi/mpi-mod.c b/lib/crypto/mpi/mpi-mod.c index 54fcc01564d9..691bbdc52fc6 100644 --- a/lib/crypto/mpi/mpi-mod.c +++ b/lib/crypto/mpi/mpi-mod.c @@ -5,153 +5,9 @@ * This file is part of Libgcrypt. */ - #include "mpi-internal.h" -#include "longlong.h" - -/* Context used with Barrett reduction. */ -struct barrett_ctx_s { - MPI m; /* The modulus - may not be modified. */ - int m_copied; /* If true, M needs to be released. */ - int k; - MPI y; - MPI r1; /* Helper MPI. */ - MPI r2; /* Helper MPI. */ - MPI r3; /* Helper MPI allocated on demand. */ -}; - - void mpi_mod(MPI rem, MPI dividend, MPI divisor) { mpi_fdiv_r(rem, dividend, divisor); } - -/* This function returns a new context for Barrett based operations on - * the modulus M. This context needs to be released using - * _gcry_mpi_barrett_free. If COPY is true M will be transferred to - * the context and the user may change M. If COPY is false, M may not - * be changed until gcry_mpi_barrett_free has been called. - */ -mpi_barrett_t mpi_barrett_init(MPI m, int copy) -{ - mpi_barrett_t ctx; - MPI tmp; - - mpi_normalize(m); - ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return NULL; - - if (copy) { - ctx->m = mpi_copy(m); - ctx->m_copied = 1; - } else - ctx->m = m; - - ctx->k = mpi_get_nlimbs(m); - tmp = mpi_alloc(ctx->k + 1); - - /* Barrett precalculation: y = floor(b^(2k) / m). */ - mpi_set_ui(tmp, 1); - mpi_lshift_limbs(tmp, 2 * ctx->k); - mpi_fdiv_q(tmp, tmp, m); - - ctx->y = tmp; - ctx->r1 = mpi_alloc(2 * ctx->k + 1); - ctx->r2 = mpi_alloc(2 * ctx->k + 1); - - return ctx; -} - -void mpi_barrett_free(mpi_barrett_t ctx) -{ - if (ctx) { - mpi_free(ctx->y); - mpi_free(ctx->r1); - mpi_free(ctx->r2); - if (ctx->r3) - mpi_free(ctx->r3); - if (ctx->m_copied) - mpi_free(ctx->m); - kfree(ctx); - } -} - - -/* R = X mod M - * - * Using Barrett reduction. Before using this function - * _gcry_mpi_barrett_init must have been called to do the - * precalculations. CTX is the context created by this precalculation - * and also conveys M. If the Barret reduction could no be done a - * straightforward reduction method is used. - * - * We assume that these conditions are met: - * Input: x =(x_2k-1 ...x_0)_b - * m =(m_k-1 ....m_0)_b with m_k-1 != 0 - * Output: r = x mod m - */ -void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx) -{ - MPI m = ctx->m; - int k = ctx->k; - MPI y = ctx->y; - MPI r1 = ctx->r1; - MPI r2 = ctx->r2; - int sign; - - mpi_normalize(x); - if (mpi_get_nlimbs(x) > 2*k) { - mpi_mod(r, x, m); - return; - } - - sign = x->sign; - x->sign = 0; - - /* 1. q1 = floor( x / b^k-1) - * q2 = q1 * y - * q3 = floor( q2 / b^k+1 ) - * Actually, we don't need qx, we can work direct on r2 - */ - mpi_set(r2, x); - mpi_rshift_limbs(r2, k-1); - mpi_mul(r2, r2, y); - mpi_rshift_limbs(r2, k+1); - - /* 2. r1 = x mod b^k+1 - * r2 = q3 * m mod b^k+1 - * r = r1 - r2 - * 3. if r < 0 then r = r + b^k+1 - */ - mpi_set(r1, x); - if (r1->nlimbs > k+1) /* Quick modulo operation. */ - r1->nlimbs = k+1; - mpi_mul(r2, r2, m); - if (r2->nlimbs > k+1) /* Quick modulo operation. */ - r2->nlimbs = k+1; - mpi_sub(r, r1, r2); - - if (mpi_has_sign(r)) { - if (!ctx->r3) { - ctx->r3 = mpi_alloc(k + 2); - mpi_set_ui(ctx->r3, 1); - mpi_lshift_limbs(ctx->r3, k + 1); - } - mpi_add(r, r, ctx->r3); - } - - /* 4. while r >= m do r = r - m */ - while (mpi_cmp(r, m) >= 0) - mpi_sub(r, r, m); - - x->sign = sign; -} - - -void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx) -{ - mpi_mul(w, u, v); - mpi_mod_barrett(w, w, ctx); -} diff --git a/lib/crypto/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c index 3cb6bd148fa9..dde01030807d 100644 --- a/lib/crypto/mpi/mpicoder.c +++ b/lib/crypto/mpi/mpicoder.c @@ -25,7 +25,6 @@ #include #include "mpi-internal.h" -#define MAX_EXTERN_SCAN_BYTES (16*1024*1024) #define MAX_EXTERN_MPI_BITS 16384 /** @@ -110,112 +109,6 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) } EXPORT_SYMBOL_GPL(mpi_read_from_buffer); -/**************** - * Fill the mpi VAL from the hex string in STR. - */ -int mpi_fromstr(MPI val, const char *str) -{ - int sign = 0; - int prepend_zero = 0; - int i, j, c, c1, c2; - unsigned int nbits, nbytes, nlimbs; - mpi_limb_t a; - - if (*str == '-') { - sign = 1; - str++; - } - - /* Skip optional hex prefix. */ - if (*str == '0' && str[1] == 'x') - str += 2; - - nbits = strlen(str); - if (nbits > MAX_EXTERN_SCAN_BYTES) { - mpi_clear(val); - return -EINVAL; - } - nbits *= 4; - if ((nbits % 8)) - prepend_zero = 1; - - nbytes = (nbits+7) / 8; - nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB; - - if (val->alloced < nlimbs) - mpi_resize(val, nlimbs); - - i = BYTES_PER_MPI_LIMB - (nbytes % BYTES_PER_MPI_LIMB); - i %= BYTES_PER_MPI_LIMB; - j = val->nlimbs = nlimbs; - val->sign = sign; - for (; j > 0; j--) { - a = 0; - for (; i < BYTES_PER_MPI_LIMB; i++) { - if (prepend_zero) { - c1 = '0'; - prepend_zero = 0; - } else - c1 = *str++; - - if (!c1) { - mpi_clear(val); - return -EINVAL; - } - c2 = *str++; - if (!c2) { - mpi_clear(val); - return -EINVAL; - } - if (c1 >= '0' && c1 <= '9') - c = c1 - '0'; - else if (c1 >= 'a' && c1 <= 'f') - c = c1 - 'a' + 10; - else if (c1 >= 'A' && c1 <= 'F') - c = c1 - 'A' + 10; - else { - mpi_clear(val); - return -EINVAL; - } - c <<= 4; - if (c2 >= '0' && c2 <= '9') - c |= c2 - '0'; - else if (c2 >= 'a' && c2 <= 'f') - c |= c2 - 'a' + 10; - else if (c2 >= 'A' && c2 <= 'F') - c |= c2 - 'A' + 10; - else { - mpi_clear(val); - return -EINVAL; - } - a <<= 8; - a |= c; - } - i = 0; - val->d[j-1] = a; - } - - return 0; -} -EXPORT_SYMBOL_GPL(mpi_fromstr); - -MPI mpi_scanval(const char *string) -{ - MPI a; - - a = mpi_alloc(0); - if (!a) - return NULL; - - if (mpi_fromstr(a, string)) { - mpi_free(a); - return NULL; - } - mpi_normalize(a); - return a; -} -EXPORT_SYMBOL_GPL(mpi_scanval); - static int count_lzeros(MPI a) { mpi_limb_t alimb; @@ -521,232 +414,3 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) return val; } EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl); - -/* Perform a two's complement operation on buffer P of size N bytes. */ -static void twocompl(unsigned char *p, unsigned int n) -{ - int i; - - for (i = n-1; i >= 0 && !p[i]; i--) - ; - if (i >= 0) { - if ((p[i] & 0x01)) - p[i] = (((p[i] ^ 0xfe) | 0x01) & 0xff); - else if ((p[i] & 0x02)) - p[i] = (((p[i] ^ 0xfc) | 0x02) & 0xfe); - else if ((p[i] & 0x04)) - p[i] = (((p[i] ^ 0xf8) | 0x04) & 0xfc); - else if ((p[i] & 0x08)) - p[i] = (((p[i] ^ 0xf0) | 0x08) & 0xf8); - else if ((p[i] & 0x10)) - p[i] = (((p[i] ^ 0xe0) | 0x10) & 0xf0); - else if ((p[i] & 0x20)) - p[i] = (((p[i] ^ 0xc0) | 0x20) & 0xe0); - else if ((p[i] & 0x40)) - p[i] = (((p[i] ^ 0x80) | 0x40) & 0xc0); - else - p[i] = 0x80; - - for (i--; i >= 0; i--) - p[i] ^= 0xff; - } -} - -int mpi_print(enum gcry_mpi_format format, unsigned char *buffer, - size_t buflen, size_t *nwritten, MPI a) -{ - unsigned int nbits = mpi_get_nbits(a); - size_t len; - size_t dummy_nwritten; - int negative; - - if (!nwritten) - nwritten = &dummy_nwritten; - - /* Libgcrypt does no always care to set clear the sign if the value - * is 0. For printing this is a bit of a surprise, in particular - * because if some of the formats don't support negative numbers but - * should be able to print a zero. Thus we need this extra test - * for a negative number. - */ - if (a->sign && mpi_cmp_ui(a, 0)) - negative = 1; - else - negative = 0; - - len = buflen; - *nwritten = 0; - if (format == GCRYMPI_FMT_STD) { - unsigned char *tmp; - int extra = 0; - unsigned int n; - - tmp = mpi_get_buffer(a, &n, NULL); - if (!tmp) - return -EINVAL; - - if (negative) { - twocompl(tmp, n); - if (!(*tmp & 0x80)) { - /* Need to extend the sign. */ - n++; - extra = 2; - } - } else if (n && (*tmp & 0x80)) { - /* Positive but the high bit of the returned buffer is set. - * Thus we need to print an extra leading 0x00 so that the - * output is interpreted as a positive number. - */ - n++; - extra = 1; - } - - if (buffer && n > len) { - /* The provided buffer is too short. */ - kfree(tmp); - return -E2BIG; - } - if (buffer) { - unsigned char *s = buffer; - - if (extra == 1) - *s++ = 0; - else if (extra) - *s++ = 0xff; - memcpy(s, tmp, n-!!extra); - } - kfree(tmp); - *nwritten = n; - return 0; - } else if (format == GCRYMPI_FMT_USG) { - unsigned int n = (nbits + 7)/8; - - /* Note: We ignore the sign for this format. */ - /* FIXME: for performance reasons we should put this into - * mpi_aprint because we can then use the buffer directly. - */ - - if (buffer && n > len) - return -E2BIG; - if (buffer) { - unsigned char *tmp; - - tmp = mpi_get_buffer(a, &n, NULL); - if (!tmp) - return -EINVAL; - memcpy(buffer, tmp, n); - kfree(tmp); - } - *nwritten = n; - return 0; - } else if (format == GCRYMPI_FMT_PGP) { - unsigned int n = (nbits + 7)/8; - - /* The PGP format can only handle unsigned integers. */ - if (negative) - return -EINVAL; - - if (buffer && n+2 > len) - return -E2BIG; - - if (buffer) { - unsigned char *tmp; - unsigned char *s = buffer; - - s[0] = nbits >> 8; - s[1] = nbits; - - tmp = mpi_get_buffer(a, &n, NULL); - if (!tmp) - return -EINVAL; - memcpy(s+2, tmp, n); - kfree(tmp); - } - *nwritten = n+2; - return 0; - } else if (format == GCRYMPI_FMT_SSH) { - unsigned char *tmp; - int extra = 0; - unsigned int n; - - tmp = mpi_get_buffer(a, &n, NULL); - if (!tmp) - return -EINVAL; - - if (negative) { - twocompl(tmp, n); - if (!(*tmp & 0x80)) { - /* Need to extend the sign. */ - n++; - extra = 2; - } - } else if (n && (*tmp & 0x80)) { - n++; - extra = 1; - } - - if (buffer && n+4 > len) { - kfree(tmp); - return -E2BIG; - } - - if (buffer) { - unsigned char *s = buffer; - - *s++ = n >> 24; - *s++ = n >> 16; - *s++ = n >> 8; - *s++ = n; - if (extra == 1) - *s++ = 0; - else if (extra) - *s++ = 0xff; - memcpy(s, tmp, n-!!extra); - } - kfree(tmp); - *nwritten = 4+n; - return 0; - } else if (format == GCRYMPI_FMT_HEX) { - unsigned char *tmp; - int i; - int extra = 0; - unsigned int n = 0; - - tmp = mpi_get_buffer(a, &n, NULL); - if (!tmp) - return -EINVAL; - if (!n || (*tmp & 0x80)) - extra = 2; - - if (buffer && 2*n + extra + negative + 1 > len) { - kfree(tmp); - return -E2BIG; - } - if (buffer) { - unsigned char *s = buffer; - - if (negative) - *s++ = '-'; - if (extra) { - *s++ = '0'; - *s++ = '0'; - } - - for (i = 0; i < n; i++) { - unsigned int c = tmp[i]; - - *s++ = (c >> 4) < 10 ? '0'+(c>>4) : 'A'+(c>>4)-10; - c &= 15; - *s++ = c < 10 ? '0'+c : 'A'+c-10; - } - *s++ = 0; - *nwritten = s - buffer; - } else { - *nwritten = 2*n + extra + negative + 1; - } - kfree(tmp); - return 0; - } else - return -EINVAL; -} -EXPORT_SYMBOL_GPL(mpi_print); diff --git a/lib/crypto/mpi/mpih-mul.c b/lib/crypto/mpi/mpih-mul.c index e5f1c84e3c48..a93647564054 100644 --- a/lib/crypto/mpi/mpih-mul.c +++ b/lib/crypto/mpi/mpih-mul.c @@ -317,31 +317,6 @@ mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace) } } - -void mpihelp_mul_n(mpi_ptr_t prodp, - mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size) -{ - if (up == vp) { - if (size < KARATSUBA_THRESHOLD) - mpih_sqr_n_basecase(prodp, up, size); - else { - mpi_ptr_t tspace; - tspace = mpi_alloc_limb_space(2 * size); - mpih_sqr_n(prodp, up, size, tspace); - mpi_free_limb_space(tspace); - } - } else { - if (size < KARATSUBA_THRESHOLD) - mul_n_basecase(prodp, up, vp, size); - else { - mpi_ptr_t tspace; - tspace = mpi_alloc_limb_space(2 * size); - mul_n(prodp, up, vp, size, tspace); - mpi_free_limb_space(tspace); - } - } -} - int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, diff --git a/lib/crypto/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c index aa8c46544af8..d57fd8afef64 100644 --- a/lib/crypto/mpi/mpiutil.c +++ b/lib/crypto/mpi/mpiutil.c @@ -20,63 +20,6 @@ #include "mpi-internal.h" -/* Constants allocated right away at startup. */ -static MPI constants[MPI_NUMBER_OF_CONSTANTS]; - -/* Initialize the MPI subsystem. This is called early and allows to - * do some initialization without taking care of threading issues. - */ -static int __init mpi_init(void) -{ - int idx; - unsigned long value; - - for (idx = 0; idx < MPI_NUMBER_OF_CONSTANTS; idx++) { - switch (idx) { - case MPI_C_ZERO: - value = 0; - break; - case MPI_C_ONE: - value = 1; - break; - case MPI_C_TWO: - value = 2; - break; - case MPI_C_THREE: - value = 3; - break; - case MPI_C_FOUR: - value = 4; - break; - case MPI_C_EIGHT: - value = 8; - break; - default: - pr_err("MPI: invalid mpi_const selector %d\n", idx); - return -EFAULT; - } - constants[idx] = mpi_alloc_set_ui(value); - constants[idx]->flags = (16|32); - } - - return 0; -} -postcore_initcall(mpi_init); - -/* Return a constant MPI descripbed by NO which is one of the - * MPI_C_xxx macros. There is no need to copy this returned value; it - * may be used directly. - */ -MPI mpi_const(enum gcry_mpi_constants no) -{ - if ((int)no < 0 || no > MPI_NUMBER_OF_CONSTANTS) - pr_err("MPI: invalid mpi_const selector %d\n", no); - if (!constants[no]) - pr_err("MPI: MPI subsystem not initialized\n"); - return constants[no]; -} -EXPORT_SYMBOL_GPL(mpi_const); - /**************** * Note: It was a bad idea to use the number of limbs to allocate * because on a alpha the limbs are large but we normally need @@ -163,15 +106,6 @@ int mpi_resize(MPI a, unsigned nlimbs) return 0; } -void mpi_clear(MPI a) -{ - if (!a) - return; - a->nlimbs = 0; - a->flags = 0; -} -EXPORT_SYMBOL_GPL(mpi_clear); - void mpi_free(MPI a) { if (!a) @@ -210,121 +144,5 @@ MPI mpi_copy(MPI a) return b; } -/**************** - * This function allocates an MPI which is optimized to hold - * a value as large as the one given in the argument and allocates it - * with the same flags as A. - */ -MPI mpi_alloc_like(MPI a) -{ - MPI b; - - if (a) { - b = mpi_alloc(a->nlimbs); - b->nlimbs = 0; - b->sign = 0; - b->flags = a->flags; - } else - b = NULL; - - return b; -} - - -/* Set U into W and release U. If W is NULL only U will be released. */ -void mpi_snatch(MPI w, MPI u) -{ - if (w) { - mpi_assign_limb_space(w, u->d, u->alloced); - w->nlimbs = u->nlimbs; - w->sign = u->sign; - w->flags = u->flags; - u->alloced = 0; - u->nlimbs = 0; - u->d = NULL; - } - mpi_free(u); -} - - -MPI mpi_set(MPI w, MPI u) -{ - mpi_ptr_t wp, up; - mpi_size_t usize = u->nlimbs; - int usign = u->sign; - - if (!w) - w = mpi_alloc(mpi_get_nlimbs(u)); - RESIZE_IF_NEEDED(w, usize); - wp = w->d; - up = u->d; - MPN_COPY(wp, up, usize); - w->nlimbs = usize; - w->flags = u->flags; - w->flags &= ~(16|32); /* Reset the immutable and constant flags. */ - w->sign = usign; - return w; -} -EXPORT_SYMBOL_GPL(mpi_set); - -MPI mpi_set_ui(MPI w, unsigned long u) -{ - if (!w) - w = mpi_alloc(1); - /* FIXME: If U is 0 we have no need to resize and thus possible - * allocating the limbs. - */ - RESIZE_IF_NEEDED(w, 1); - w->d[0] = u; - w->nlimbs = u ? 1 : 0; - w->sign = 0; - w->flags = 0; - return w; -} -EXPORT_SYMBOL_GPL(mpi_set_ui); - -MPI mpi_alloc_set_ui(unsigned long u) -{ - MPI w = mpi_alloc(1); - w->d[0] = u; - w->nlimbs = u ? 1 : 0; - w->sign = 0; - return w; -} - -/**************** - * Swap the value of A and B, when SWAP is 1. - * Leave the value when SWAP is 0. - * This implementation should be constant-time regardless of SWAP. - */ -void mpi_swap_cond(MPI a, MPI b, unsigned long swap) -{ - mpi_size_t i; - mpi_size_t nlimbs; - mpi_limb_t mask = ((mpi_limb_t)0) - swap; - mpi_limb_t x; - - if (a->alloced > b->alloced) - nlimbs = b->alloced; - else - nlimbs = a->alloced; - if (a->nlimbs > nlimbs || b->nlimbs > nlimbs) - return; - - for (i = 0; i < nlimbs; i++) { - x = mask & (a->d[i] ^ b->d[i]); - a->d[i] = a->d[i] ^ x; - b->d[i] = b->d[i] ^ x; - } - - x = mask & (a->nlimbs ^ b->nlimbs); - a->nlimbs = a->nlimbs ^ x; - b->nlimbs = b->nlimbs ^ x; - - x = mask & (a->sign ^ b->sign); - a->sign = a->sign ^ x; - b->sign = b->sign ^ x; -} - MODULE_DESCRIPTION("Multiprecision maths library"); MODULE_LICENSE("GPL"); From 8e3a67f2de87ee94ac11ea69beb4edc2993b17a0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 10 Aug 2024 14:20:57 +0800 Subject: [PATCH 33/96] crypto: lib/mpi - Add error checks to extension The remaining functions added by commit a8ea8bdd9df92a0e5db5b43900abb7a288b8a53e did not check for memory allocation errors. Add the checks and change the API to allow errors to be returned. Fixes: a8ea8bdd9df9 ("lib/mpi: Extend the MPI library") Signed-off-by: Herbert Xu --- include/linux/mpi.h | 22 +++++++------- lib/crypto/mpi/mpi-add.c | 38 ++++++++++++++++-------- lib/crypto/mpi/mpi-bit.c | 25 +++++++++++----- lib/crypto/mpi/mpi-div.c | 55 +++++++++++++++++++++++++---------- lib/crypto/mpi/mpi-internal.h | 11 +++---- lib/crypto/mpi/mpi-mod.c | 4 +-- lib/crypto/mpi/mpi-mul.c | 29 ++++++++++++++---- lib/crypto/mpi/mpiutil.c | 2 ++ 8 files changed, 128 insertions(+), 58 deletions(-) diff --git a/include/linux/mpi.h b/include/linux/mpi.h index e081428b91ef..47be46f36435 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -59,7 +59,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, int *sign); /*-- mpi-mod.c --*/ -void mpi_mod(MPI rem, MPI dividend, MPI divisor); +int mpi_mod(MPI rem, MPI dividend, MPI divisor); /*-- mpi-pow.c --*/ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); @@ -75,22 +75,22 @@ int mpi_sub_ui(MPI w, MPI u, unsigned long vval); void mpi_normalize(MPI a); unsigned mpi_get_nbits(MPI a); int mpi_test_bit(MPI a, unsigned int n); -void mpi_set_bit(MPI a, unsigned int n); -void mpi_rshift(MPI x, MPI a, unsigned int n); +int mpi_set_bit(MPI a, unsigned int n); +int mpi_rshift(MPI x, MPI a, unsigned int n); /*-- mpi-add.c --*/ -void mpi_add(MPI w, MPI u, MPI v); -void mpi_sub(MPI w, MPI u, MPI v); -void mpi_addm(MPI w, MPI u, MPI v, MPI m); -void mpi_subm(MPI w, MPI u, MPI v, MPI m); +int mpi_add(MPI w, MPI u, MPI v); +int mpi_sub(MPI w, MPI u, MPI v); +int mpi_addm(MPI w, MPI u, MPI v, MPI m); +int mpi_subm(MPI w, MPI u, MPI v, MPI m); /*-- mpi-mul.c --*/ -void mpi_mul(MPI w, MPI u, MPI v); -void mpi_mulm(MPI w, MPI u, MPI v, MPI m); +int mpi_mul(MPI w, MPI u, MPI v); +int mpi_mulm(MPI w, MPI u, MPI v, MPI m); /*-- mpi-div.c --*/ -void mpi_tdiv_r(MPI rem, MPI num, MPI den); -void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor); +int mpi_tdiv_r(MPI rem, MPI num, MPI den); +int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor); /* inline functions */ diff --git a/lib/crypto/mpi/mpi-add.c b/lib/crypto/mpi/mpi-add.c index b47c8c35f5fe..3015140d4860 100644 --- a/lib/crypto/mpi/mpi-add.c +++ b/lib/crypto/mpi/mpi-add.c @@ -13,11 +13,12 @@ #include "mpi-internal.h" -void mpi_add(MPI w, MPI u, MPI v) +int mpi_add(MPI w, MPI u, MPI v) { mpi_ptr_t wp, up, vp; mpi_size_t usize, vsize, wsize; int usign, vsign, wsign; + int err; if (u->nlimbs < v->nlimbs) { /* Swap U and V. */ usize = v->nlimbs; @@ -25,7 +26,9 @@ void mpi_add(MPI w, MPI u, MPI v) vsize = u->nlimbs; vsign = u->sign; wsize = usize + 1; - RESIZE_IF_NEEDED(w, wsize); + err = RESIZE_IF_NEEDED(w, wsize); + if (err) + return err; /* These must be after realloc (u or v may be the same as w). */ up = v->d; vp = u->d; @@ -35,7 +38,9 @@ void mpi_add(MPI w, MPI u, MPI v) vsize = v->nlimbs; vsign = v->sign; wsize = usize + 1; - RESIZE_IF_NEEDED(w, wsize); + err = RESIZE_IF_NEEDED(w, wsize); + if (err) + return err; /* These must be after realloc (u or v may be the same as w). */ up = u->d; vp = v->d; @@ -77,28 +82,37 @@ void mpi_add(MPI w, MPI u, MPI v) w->nlimbs = wsize; w->sign = wsign; + return 0; } EXPORT_SYMBOL_GPL(mpi_add); -void mpi_sub(MPI w, MPI u, MPI v) +int mpi_sub(MPI w, MPI u, MPI v) { - MPI vv = mpi_copy(v); + int err; + MPI vv; + + vv = mpi_copy(v); + if (!vv) + return -ENOMEM; + vv->sign = !vv->sign; - mpi_add(w, u, vv); + err = mpi_add(w, u, vv); mpi_free(vv); + + return err; } EXPORT_SYMBOL_GPL(mpi_sub); -void mpi_addm(MPI w, MPI u, MPI v, MPI m) +int mpi_addm(MPI w, MPI u, MPI v, MPI m) { - mpi_add(w, u, v); - mpi_mod(w, w, m); + return mpi_add(w, u, v) ?: + mpi_mod(w, w, m); } EXPORT_SYMBOL_GPL(mpi_addm); -void mpi_subm(MPI w, MPI u, MPI v, MPI m) +int mpi_subm(MPI w, MPI u, MPI v, MPI m) { - mpi_sub(w, u, v); - mpi_mod(w, w, m); + return mpi_sub(w, u, v) ?: + mpi_mod(w, w, m); } EXPORT_SYMBOL_GPL(mpi_subm); diff --git a/lib/crypto/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c index c29b85362664..835a2f0622a0 100644 --- a/lib/crypto/mpi/mpi-bit.c +++ b/lib/crypto/mpi/mpi-bit.c @@ -76,9 +76,10 @@ EXPORT_SYMBOL_GPL(mpi_test_bit); /**************** * Set bit N of A. */ -void mpi_set_bit(MPI a, unsigned int n) +int mpi_set_bit(MPI a, unsigned int n) { unsigned int i, limbno, bitno; + int err; limbno = n / BITS_PER_MPI_LIMB; bitno = n % BITS_PER_MPI_LIMB; @@ -86,27 +87,31 @@ void mpi_set_bit(MPI a, unsigned int n) if (limbno >= a->nlimbs) { for (i = a->nlimbs; i < a->alloced; i++) a->d[i] = 0; - mpi_resize(a, limbno+1); + err = mpi_resize(a, limbno+1); + if (err) + return err; a->nlimbs = limbno+1; } a->d[limbno] |= (A_LIMB_1<= x->nlimbs) { x->nlimbs = 0; - return; + return 0; } if (nlimbs) { @@ -121,7 +126,9 @@ void mpi_rshift(MPI x, MPI a, unsigned int n) /* Copy and shift by more or equal bits than in a limb. */ xsize = a->nlimbs; x->sign = a->sign; - RESIZE_IF_NEEDED(x, xsize); + err = RESIZE_IF_NEEDED(x, xsize); + if (err) + return err; x->nlimbs = xsize; for (i = 0; i < a->nlimbs; i++) x->d[i] = a->d[i]; @@ -129,7 +136,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n) if (nlimbs >= x->nlimbs) { x->nlimbs = 0; - return; + return 0; } for (i = 0; i < x->nlimbs - nlimbs; i++) @@ -143,7 +150,9 @@ void mpi_rshift(MPI x, MPI a, unsigned int n) /* Copy and shift by less than bits in a limb. */ xsize = a->nlimbs; x->sign = a->sign; - RESIZE_IF_NEEDED(x, xsize); + err = RESIZE_IF_NEEDED(x, xsize); + if (err) + return err; x->nlimbs = xsize; if (xsize) { @@ -159,5 +168,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n) } } MPN_NORMALIZE(x->d, x->nlimbs); + + return 0; } EXPORT_SYMBOL_GPL(mpi_rshift); diff --git a/lib/crypto/mpi/mpi-div.c b/lib/crypto/mpi/mpi-div.c index 2ff0ebd74fd7..6e5044e72595 100644 --- a/lib/crypto/mpi/mpi-div.c +++ b/lib/crypto/mpi/mpi-div.c @@ -14,12 +14,13 @@ #include "mpi-internal.h" #include "longlong.h" -void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den); +int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den); -void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor) +int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor) { int divisor_sign = divisor->sign; MPI temp_divisor = NULL; + int err; /* We need the original value of the divisor after the remainder has been * preliminary calculated. We have to copy it to temporary space if it's @@ -27,16 +28,22 @@ void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor) */ if (rem == divisor) { temp_divisor = mpi_copy(divisor); + if (!temp_divisor) + return -ENOMEM; divisor = temp_divisor; } - mpi_tdiv_r(rem, dividend, divisor); + err = mpi_tdiv_r(rem, dividend, divisor); + if (err) + goto free_temp_divisor; if (((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs) - mpi_add(rem, rem, divisor); + err = mpi_add(rem, rem, divisor); - if (temp_divisor) - mpi_free(temp_divisor); +free_temp_divisor: + mpi_free(temp_divisor); + + return err; } /* If den == quot, den needs temporary storage. @@ -46,12 +53,12 @@ void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor) * i.e no extra storage should be allocated. */ -void mpi_tdiv_r(MPI rem, MPI num, MPI den) +int mpi_tdiv_r(MPI rem, MPI num, MPI den) { - mpi_tdiv_qr(NULL, rem, num, den); + return mpi_tdiv_qr(NULL, rem, num, den); } -void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) +int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) { mpi_ptr_t np, dp; mpi_ptr_t qp, rp; @@ -64,13 +71,16 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) mpi_limb_t q_limb; mpi_ptr_t marker[5]; int markidx = 0; + int err; /* Ensure space is enough for quotient and remainder. * We need space for an extra limb in the remainder, because it's * up-shifted (normalized) below. */ rsize = nsize + 1; - mpi_resize(rem, rsize); + err = mpi_resize(rem, rsize); + if (err) + return err; qsize = rsize - dsize; /* qsize cannot be bigger than this. */ if (qsize <= 0) { @@ -86,11 +96,14 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) quot->nlimbs = 0; quot->sign = 0; } - return; + return 0; } - if (quot) - mpi_resize(quot, qsize); + if (quot) { + err = mpi_resize(quot, qsize); + if (err) + return err; + } /* Read pointers here, when reallocation is finished. */ np = num->d; @@ -112,10 +125,10 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) rsize = rlimb != 0?1:0; rem->nlimbs = rsize; rem->sign = sign_remainder; - return; + return 0; } - + err = -ENOMEM; if (quot) { qp = quot->d; /* Make sure QP and NP point to different objects. Otherwise the @@ -123,6 +136,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) */ if (qp == np) { /* Copy NP object to temporary space. */ np = marker[markidx++] = mpi_alloc_limb_space(nsize); + if (!np) + goto out_free_marker; MPN_COPY(np, qp, nsize); } } else /* Put quotient at top of remainder. */ @@ -143,6 +158,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) * the original contents of the denominator. */ tp = marker[markidx++] = mpi_alloc_limb_space(dsize); + if (!tp) + goto out_free_marker; mpihelp_lshift(tp, dp, dsize, normalization_steps); dp = tp; @@ -164,6 +181,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) mpi_ptr_t tp; tp = marker[markidx++] = mpi_alloc_limb_space(dsize); + if (!tp) + goto out_free_marker; MPN_COPY(tp, dp, dsize); dp = tp; } @@ -198,8 +217,14 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den) rem->nlimbs = rsize; rem->sign = sign_remainder; + + err = 0; + +out_free_marker: while (markidx) { markidx--; mpi_free_limb_space(marker[markidx]); } + + return err; } diff --git a/lib/crypto/mpi/mpi-internal.h b/lib/crypto/mpi/mpi-internal.h index b6fbb43afbc8..8a4f49e3043c 100644 --- a/lib/crypto/mpi/mpi-internal.h +++ b/lib/crypto/mpi/mpi-internal.h @@ -52,11 +52,12 @@ typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ typedef int mpi_size_t; /* (must be a signed type) */ -#define RESIZE_IF_NEEDED(a, b) \ - do { \ - if ((a)->alloced < (b)) \ - mpi_resize((a), (b)); \ - } while (0) +static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) +{ + if (a->alloced < b) + return mpi_resize(a, b); + return 0; +} /* Copy N limbs from S to D. */ #define MPN_COPY(d, s, n) \ diff --git a/lib/crypto/mpi/mpi-mod.c b/lib/crypto/mpi/mpi-mod.c index 691bbdc52fc6..d5fdaec3d0b6 100644 --- a/lib/crypto/mpi/mpi-mod.c +++ b/lib/crypto/mpi/mpi-mod.c @@ -7,7 +7,7 @@ #include "mpi-internal.h" -void mpi_mod(MPI rem, MPI dividend, MPI divisor) +int mpi_mod(MPI rem, MPI dividend, MPI divisor) { - mpi_fdiv_r(rem, dividend, divisor); + return mpi_fdiv_r(rem, dividend, divisor); } diff --git a/lib/crypto/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c index 7f4eda8560dc..892a246216b9 100644 --- a/lib/crypto/mpi/mpi-mul.c +++ b/lib/crypto/mpi/mpi-mul.c @@ -13,7 +13,7 @@ #include "mpi-internal.h" -void mpi_mul(MPI w, MPI u, MPI v) +int mpi_mul(MPI w, MPI u, MPI v) { mpi_size_t usize, vsize, wsize; mpi_ptr_t up, vp, wp; @@ -21,6 +21,7 @@ void mpi_mul(MPI w, MPI u, MPI v) int usign, vsign, sign_product; int assign_wp = 0; mpi_ptr_t tmp_limb = NULL; + int err; if (u->nlimbs < v->nlimbs) { /* Swap U and V. */ @@ -46,15 +47,21 @@ void mpi_mul(MPI w, MPI u, MPI v) if (w->alloced < wsize) { if (wp == up || wp == vp) { wp = mpi_alloc_limb_space(wsize); + if (!wp) + return -ENOMEM; assign_wp = 1; } else { - mpi_resize(w, wsize); + err = mpi_resize(w, wsize); + if (err) + return err; wp = w->d; } } else { /* Make U and V not overlap with W. */ if (wp == up) { /* W and U are identical. Allocate temporary space for U. */ up = tmp_limb = mpi_alloc_limb_space(usize); + if (!up) + return -ENOMEM; /* Is V identical too? Keep it identical with U. */ if (wp == vp) vp = up; @@ -63,6 +70,8 @@ void mpi_mul(MPI w, MPI u, MPI v) } else if (wp == vp) { /* W and V are identical. Allocate temporary space for V. */ vp = tmp_limb = mpi_alloc_limb_space(vsize); + if (!vp) + return -ENOMEM; /* Copy to the temporary space. */ MPN_COPY(vp, wp, vsize); } @@ -71,7 +80,12 @@ void mpi_mul(MPI w, MPI u, MPI v) if (!vsize) wsize = 0; else { - mpihelp_mul(wp, up, usize, vp, vsize, &cy); + err = mpihelp_mul(wp, up, usize, vp, vsize, &cy); + if (err) { + if (assign_wp) + mpi_free_limb_space(wp); + goto free_tmp_limb; + } wsize -= cy ? 0:1; } @@ -79,14 +93,17 @@ void mpi_mul(MPI w, MPI u, MPI v) mpi_assign_limb_space(w, wp, wsize); w->nlimbs = wsize; w->sign = sign_product; + +free_tmp_limb: if (tmp_limb) mpi_free_limb_space(tmp_limb); + return err; } EXPORT_SYMBOL_GPL(mpi_mul); -void mpi_mulm(MPI w, MPI u, MPI v, MPI m) +int mpi_mulm(MPI w, MPI u, MPI v, MPI m) { - mpi_mul(w, u, v); - mpi_tdiv_r(w, w, m); + return mpi_mul(w, u, v) ?: + mpi_tdiv_r(w, w, m); } EXPORT_SYMBOL_GPL(mpi_mulm); diff --git a/lib/crypto/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c index d57fd8afef64..979ece5a81d2 100644 --- a/lib/crypto/mpi/mpiutil.c +++ b/lib/crypto/mpi/mpiutil.c @@ -133,6 +133,8 @@ MPI mpi_copy(MPI a) if (a) { b = mpi_alloc(a->nlimbs); + if (!b) + return NULL; b->nlimbs = a->nlimbs; b->sign = a->sign; b->flags = a->flags; From 560efa7fca4f4f5bd390e54d43cd2cd1831c60cd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 10 Aug 2024 14:20:59 +0800 Subject: [PATCH 34/96] crypto: dh - Check mpi_rshift errors Now that mpi_rshift can return errors, check them. Fixes: 35d2bf20683f ("crypto: dh - calculate Q from P for the full public key verification") Signed-off-by: Herbert Xu --- crypto/dh.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/dh.c b/crypto/dh.c index 68d11d66c0b5..afc0fd847761 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -145,9 +145,9 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) * ->p is odd, so no need to explicitly subtract one * from it before shifting to the right. */ - mpi_rshift(q, ctx->p, 1); + ret = mpi_rshift(q, ctx->p, 1) ?: + mpi_powm(val, y, q, ctx->p); - ret = mpi_powm(val, y, q, ctx->p); mpi_free(q); if (ret) { mpi_free(val); From 5a72a244bac3e8663834d88bb0b4f9069203e5e0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 10 Aug 2024 14:21:02 +0800 Subject: [PATCH 35/96] crypto: rsa - Check MPI allocation errors Fixes: 6637e11e4ad2 ("crypto: rsa - allow only odd e and restrict value in FIPS mode") Fixes: f145d411a67e ("crypto: rsa - implement Chinese Remainder Theorem for faster private key operation") Signed-off-by: Herbert Xu --- crypto/rsa.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/crypto/rsa.c b/crypto/rsa.c index d9be9e86097e..78b28d14ced3 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -98,14 +98,13 @@ static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c) goto err_free_mpi; /* (2iii) h = (m_1 - m_2) * qInv mod p */ - mpi_sub(m12_or_qh, m_or_m1_or_h, m2); - mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p); + ret = mpi_sub(m12_or_qh, m_or_m1_or_h, m2) ?: + mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p); /* (2iv) m = m_2 + q * h */ - mpi_mul(m12_or_qh, key->q, m_or_m1_or_h); - mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n); - - ret = 0; + ret = ret ?: + mpi_mul(m12_or_qh, key->q, m_or_m1_or_h) ?: + mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n); err_free_mpi: mpi_free(m12_or_qh); @@ -236,6 +235,7 @@ static int rsa_check_key_length(unsigned int len) static int rsa_check_exponent_fips(MPI e) { MPI e_max = NULL; + int err; /* check if odd */ if (!mpi_test_bit(e, 0)) { @@ -250,7 +250,12 @@ static int rsa_check_exponent_fips(MPI e) e_max = mpi_alloc(0); if (!e_max) return -ENOMEM; - mpi_set_bit(e_max, 256); + + err = mpi_set_bit(e_max, 256); + if (err) { + mpi_free(e_max); + return err; + } if (mpi_cmp(e, e_max) >= 0) { mpi_free(e_max); From a304393a9d950ae498151890d6cacc83909d90a4 Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu Date: Sun, 11 Aug 2024 14:28:16 +0800 Subject: [PATCH 36/96] crypto: octeontx - Remove custom swap function in favor of built-in sort swap The custom swap function used in octeontx driver do not perform any special operations and can be replaced with the built-in swap function of sort. This change not only reduces code size but also improves efficiency, especially in scenarios where CONFIG_RETPOLINE is enabled, as it makes indirect function calls more expensive. By using the built-in swap, we avoid these costly indirect function calls, leading to better performance. Signed-off-by: Kuan-Wei Chiu Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptvf_algs.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 3c5d577d8f0d..e53c79fe6342 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -1613,14 +1613,6 @@ static int compare_func(const void *lptr, const void *rptr) return 0; } -static void swap_func(void *lptr, void *rptr, int size) -{ - struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; - struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; - - swap(*ldesc, *rdesc); -} - int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, enum otx_cptpf_type pf_type, enum otx_cptvf_type engine_type, @@ -1655,7 +1647,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, is_crypto_registered = true; } sort(se_devices.desc, count, sizeof(struct cpt_device_desc), - compare_func, swap_func); + compare_func, NULL); break; case OTX_CPT_AE_TYPES: @@ -1670,7 +1662,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, ae_devices.desc[count++].dev = pdev; atomic_inc(&ae_devices.count); sort(ae_devices.desc, count, sizeof(struct cpt_device_desc), - compare_func, swap_func); + compare_func, NULL); break; default: From 8e84a650079a0044374f57af8d24b367fc795340 Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu Date: Sun, 11 Aug 2024 14:28:17 +0800 Subject: [PATCH 37/96] crypto: octeontx2 - Remove custom swap functions in favor of built-in sort swap The custom swap functions used in octeontx2 driver do not perform any special operations and can be replaced with the built-in swap function of sort. This change not only reduces code size but also improves efficiency, especially in scenarios where CONFIG_RETPOLINE is enabled, as it makes indirect function calls more expensive. By using the built-in swap, we avoid these costly indirect function calls, leading to better performance. Signed-off-by: Kuan-Wei Chiu Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index 1604fc58dc13..ff7cc8c13e73 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -1702,14 +1702,6 @@ static int compare_func(const void *lptr, const void *rptr) return 0; } -static void swap_func(void *lptr, void *rptr, int size) -{ - struct cpt_device_desc *ldesc = lptr; - struct cpt_device_desc *rdesc = rptr; - - swap(*ldesc, *rdesc); -} - int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, int num_queues, int num_devices) { @@ -1739,7 +1731,7 @@ int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, is_crypto_registered = true; } sort(se_devices.desc, count, sizeof(struct cpt_device_desc), - compare_func, swap_func); + compare_func, NULL); unlock: mutex_unlock(&mutex); From 95a798d20060d2b648dd604321e347c85edfd783 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Mon, 12 Aug 2024 08:25:42 +0200 Subject: [PATCH 38/96] crypto: jitter - set default OSR to 3 The user space Jitter RNG library uses the oversampling rate of 3 which implies that each time stamp is credited with 1/3 bit of entropy. To obtain 256 bits of entropy, 768 time stamps need to be sampled. The increase in OSR is applied based on a report where the Jitter RNG is used on a system exhibiting a challenging environment to collect entropy. This OSR default value is now applied to the Linux kernel version of the Jitter RNG as well. The increase in the OSR from 1 to 3 also implies that the Jitter RNG is now slower by default. Reported-by: Jeff Barnes Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 72e2decb8c6a..a779cab668c2 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1305,7 +1305,7 @@ config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE config CRYPTO_JITTERENTROPY_OSR int "CPU Jitter RNG Oversampling Rate" range 1 15 - default 1 + default 3 help The Jitter RNG allows the specification of an oversampling rate (OSR). The Jitter RNG operation requires a fixed amount of timing From 86c85d6657e478c516c825d3d4a2a61aef627934 Mon Sep 17 00:00:00 2001 From: Liao Chen Date: Wed, 14 Aug 2024 02:44:06 +0000 Subject: [PATCH 39/96] crypto: keembay - fix module autoloading Add MODULE_DEVICE_TABLE(), so modules could be properly autoloaded based on the alias from of_device_id table. Signed-off-by: Liao Chen Acked-by: Daniele Alessandrelli Signed-off-by: Herbert Xu --- drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c index c2dfca73fe4e..e54c79890d44 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c @@ -1150,6 +1150,7 @@ static const struct of_device_id kmb_ocs_hcu_of_match[] = { }, {} }; +MODULE_DEVICE_TABLE(of, kmb_ocs_hcu_of_match); static void kmb_ocs_hcu_remove(struct platform_device *pdev) { From 3363c460ef726ba693704dbcd73b7e7214ccc788 Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Tue, 13 Aug 2024 21:48:02 -0700 Subject: [PATCH 40/96] crypto: x86/sha256 - Add parentheses around macros' single arguments The macros FOUR_ROUNDS_AND_SCHED and DO_4ROUNDS rely on an unexpected/undocumented behavior of the GNU assembler, which might change in the future (https://sourceware.org/bugzilla/show_bug.cgi?id=32073). M (1) (2) // 1 arg !? Future: 2 args M 1 + 2 // 1 arg !? Future: 3 args M 1 2 // 2 args Add parentheses around the single arguments to support future GNU assembler and LLVM integrated assembler (when the IsOperator hack from the following link is dropped). Link: https://github.com/llvm/llvm-project/commit/055006475e22014b28a070db1bff41ca15f322f0 Signed-off-by: Fangrui Song Reviewed-by: Jan Beulich Signed-off-by: Herbert Xu --- arch/x86/crypto/sha256-avx2-asm.S | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 0ffb072be956..0bbec1c75cd0 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -592,22 +592,22 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) leaq K256+0*32(%rip), INP ## reuse INP as scratch reg vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) - FOUR_ROUNDS_AND_SCHED _XFER + 0*32 + FOUR_ROUNDS_AND_SCHED (_XFER + 0*32) leaq K256+1*32(%rip), INP vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) - FOUR_ROUNDS_AND_SCHED _XFER + 1*32 + FOUR_ROUNDS_AND_SCHED (_XFER + 1*32) leaq K256+2*32(%rip), INP vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 2*32+_XFER(%rsp, SRND) - FOUR_ROUNDS_AND_SCHED _XFER + 2*32 + FOUR_ROUNDS_AND_SCHED (_XFER + 2*32) leaq K256+3*32(%rip), INP vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 3*32+_XFER(%rsp, SRND) - FOUR_ROUNDS_AND_SCHED _XFER + 3*32 + FOUR_ROUNDS_AND_SCHED (_XFER + 3*32) add $4*32, SRND cmp $3*4*32, SRND @@ -618,12 +618,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) leaq K256+0*32(%rip), INP vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) - DO_4ROUNDS _XFER + 0*32 + DO_4ROUNDS (_XFER + 0*32) leaq K256+1*32(%rip), INP vpaddd (INP, SRND), X1, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) - DO_4ROUNDS _XFER + 1*32 + DO_4ROUNDS (_XFER + 1*32) add $2*32, SRND vmovdqa X2, X0 @@ -651,8 +651,8 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) xor SRND, SRND .align 16 .Lloop3: - DO_4ROUNDS _XFER + 0*32 + 16 - DO_4ROUNDS _XFER + 1*32 + 16 + DO_4ROUNDS (_XFER + 0*32 + 16) + DO_4ROUNDS (_XFER + 1*32 + 16) add $2*32, SRND cmp $4*4*32, SRND jb .Lloop3 From 142a794bcf007a22a5b14700e26d740b28d90754 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 14 Aug 2024 15:42:40 -0500 Subject: [PATCH 41/96] crypto: ccp - Add additional information about an SEV firmware upgrade Print additional information, in the form of the old and new versions of the SEV firmware, so that it can be seen what the base firmware was before the upgrade. Signed-off-by: Tom Lendacky Reviewed-by: Ashish Kalra Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 9810edbb272d..3b89f23afcd7 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1629,8 +1629,6 @@ static int sev_update_firmware(struct device *dev) if (ret) dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); - else - dev_info(dev, "SEV firmware update successful\n"); __free_pages(p, order); @@ -2382,6 +2380,7 @@ void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; struct sev_platform_init_args args = {0}; + u8 api_major, api_minor, build; int rc; if (!sev) @@ -2392,9 +2391,19 @@ void sev_pci_init(void) if (sev_get_api_version()) goto err; + api_major = sev->api_major; + api_minor = sev->api_minor; + build = sev->build; + if (sev_update_firmware(sev->dev) == 0) sev_get_api_version(); + if (api_major != sev->api_major || api_minor != sev->api_minor || + build != sev->build) + dev_info(sev->dev, "SEV firmware updated from %d.%d.%d to %d.%d.%d\n", + api_major, api_minor, build, + sev->api_major, sev->api_minor, sev->build); + /* Initialize the platform */ args.probe = true; rc = sev_platform_init(&args); From b63483b37e813299445d2719488acab2b3f20544 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2024 00:11:51 +0300 Subject: [PATCH 42/96] crypto: spacc - Fix bounds checking on spacc->job[] This bounds checking is off by one. The > should be >=. The spacc->job[] array is allocated in spacc_init() and it has SPACC_MAX_JOBS elements. Fixes: 8ebb14deef0f ("crypto: spacc - Enable SPAcc AUTODETECT") Fixes: c8981d9230d8 ("crypto: spacc - Add SPAcc Skcipher support") Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/dwc-spacc/spacc_core.c b/drivers/crypto/dwc-spacc/spacc_core.c index 9bc49de06bb2..e3380528e82b 100644 --- a/drivers/crypto/dwc-spacc/spacc_core.c +++ b/drivers/crypto/dwc-spacc/spacc_core.c @@ -1024,7 +1024,7 @@ int spacc_set_operation(struct spacc_device *spacc, int handle, int op, int ret = CRYPTO_OK; struct spacc_job *job = NULL; - if (handle < 0 || handle > SPACC_MAX_JOBS) + if (handle < 0 || handle >= SPACC_MAX_JOBS) return -ENXIO; job = &spacc->job[handle]; @@ -1105,7 +1105,7 @@ int spacc_packet_enqueue_ddt_ex(struct spacc_device *spacc, int use_jb, struct spacc_job *job; int ret = CRYPTO_OK, proc_len; - if (job_idx < 0 || job_idx > SPACC_MAX_JOBS) + if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) return -ENXIO; switch (prio) { @@ -1331,7 +1331,7 @@ static int spacc_set_auxinfo(struct spacc_device *spacc, int jobid, int ret = CRYPTO_OK; struct spacc_job *job; - if (jobid < 0 || jobid > SPACC_MAX_JOBS) + if (jobid < 0 || jobid >= SPACC_MAX_JOBS) return -ENXIO; job = &spacc->job[jobid]; @@ -2364,7 +2364,7 @@ int spacc_set_key_exp(struct spacc_device *spacc, int job_idx) struct spacc_ctx *ctx = NULL; struct spacc_job *job = NULL; - if (job_idx < 0 || job_idx > SPACC_MAX_JOBS) { + if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) { pr_debug("ERR: Invalid Job id specified (out of range)\n"); return -ENXIO; } From 5d22d37aa8b93efaad797faf80db40ea59453481 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2024 00:11:58 +0300 Subject: [PATCH 43/96] crypto: spacc - Fix off by one in spacc_isenabled() The spacc->config.modes[] array has CRYPTO_MODE_LAST number of elements so this > comparison should be >= to prevent an out of bounds access. Fixes: c8981d9230d8 ("crypto: spacc - Add SPAcc Skcipher support") Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/dwc-spacc/spacc_core.c b/drivers/crypto/dwc-spacc/spacc_core.c index e3380528e82b..b7630f559973 100644 --- a/drivers/crypto/dwc-spacc/spacc_core.c +++ b/drivers/crypto/dwc-spacc/spacc_core.c @@ -1295,7 +1295,7 @@ int spacc_isenabled(struct spacc_device *spacc, int mode, int keysize) { int x; - if (mode < 0 || mode > CRYPTO_MODE_LAST) + if (mode < 0 || mode >= CRYPTO_MODE_LAST) return 0; if (mode == CRYPTO_MODE_NULL || From c76c9ec333432088a1c6f52650c149530fc5df5d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2024 00:12:03 +0300 Subject: [PATCH 44/96] crypto: spacc - Add a new line in spacc_open() Put the break statement should be on its own line. Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/dwc-spacc/spacc_core.c b/drivers/crypto/dwc-spacc/spacc_core.c index b7630f559973..b9e0d3227f81 100644 --- a/drivers/crypto/dwc-spacc/spacc_core.c +++ b/drivers/crypto/dwc-spacc/spacc_core.c @@ -1904,7 +1904,8 @@ int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctxid, ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHAKE256); ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_KMAC); break; + HM_SHAKE_KMAC); + break; case CRYPTO_MODE_MAC_KMACXOF128: ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHAKE128); From f036dd566453176d4eafb9701ebd69e7e59d6707 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 Aug 2024 16:51:38 +0800 Subject: [PATCH 45/96] crypto: spacc - Use crypto_authenc_extractkeys Use the crypto_authenc_extractkeys helper rather than ad-hoc parsing. Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_aead.c | 33 ++++++++------------------- 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/drivers/crypto/dwc-spacc/spacc_aead.c b/drivers/crypto/dwc-spacc/spacc_aead.c index 3468ff605957..3a617da9007d 100755 --- a/drivers/crypto/dwc-spacc/spacc_aead.c +++ b/drivers/crypto/dwc-spacc/spacc_aead.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -540,15 +539,13 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, { struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); const struct spacc_alg *salg = spacc_tfm_aead(&tfm->base); + struct crypto_authenc_keys authenc_keys; struct spacc_priv *priv; - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; unsigned int authkeylen, enckeylen; const unsigned char *authkey, *enckey; unsigned char xcbc[64]; - - int err = -EINVAL; int singlekey = 0; + int err; /* are keylens valid? */ ctx->ctx_valid = false; @@ -569,26 +566,14 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto skipover; } - if (!RTA_OK(rta, keylen) || - rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM || - RTA_PAYLOAD(rta) < sizeof(*param)) - return -EINVAL; + err = crypto_authenc_extractkeys(&authenc_keys, key, keylen); + if (err) + return err; - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < enckeylen) - return -EINVAL; - - authkeylen = keylen - enckeylen; - - /* enckey is at &key[authkeylen] and - * authkey is at &key[0] - */ - authkey = &key[0]; - enckey = &key[authkeylen]; + authkeylen = authenc_keys.authkeylen; + authkey = authenc_keys.authkey; + enckeylen = authenc_keys.enckeylen; + enckey = authenc_keys.enckey; skipover: /* detect RFC3686/4106 and trim from enckeylen(and copy salt..) */ From c32f08d024e275059474b3c11c1fc2bc7f2de990 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2024 14:20:04 +0300 Subject: [PATCH 46/96] crypto: spacc - Fix uninitialized variable in spacc_aead_process() Smatch complains that: drivers/crypto/dwc-spacc/spacc_aead.c:1031 spacc_aead_process() error: uninitialized symbol 'ptaadsize'. This could happen if, for example, tctx->mode was CRYPTO_MODE_NULL and req->cryptlen was less than icvremove. Fixes: 06af76b46c78 ("crypto: spacc - Add SPAcc aead support") Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_aead.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/crypto/dwc-spacc/spacc_aead.c b/drivers/crypto/dwc-spacc/spacc_aead.c index 3a617da9007d..abf5971e919d 100755 --- a/drivers/crypto/dwc-spacc/spacc_aead.c +++ b/drivers/crypto/dwc-spacc/spacc_aead.c @@ -808,7 +808,7 @@ static int spacc_aead_process(struct aead_request *req, u64 seq, int encrypt) u32 dstoff; int icvremove; int ivaadsize; - int ptaadsize; + int ptaadsize = 0; int iv_to_context; int spacc_proc_len; u32 spacc_icv_offset = 0; @@ -959,8 +959,6 @@ static int spacc_aead_process(struct aead_request *req, u64 seq, int encrypt) tctx->mode == CRYPTO_MODE_NULL) { if (req->cryptlen >= icvremove) ptaadsize = req->cryptlen - icvremove; - } else { - ptaadsize = 0; } /* Calculate and set the below, important parameters From 8bc1bfa02e37d63632f0cb65543e3e71acdccafb Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2024 14:20:12 +0300 Subject: [PATCH 47/96] crypto: spacc - Fix NULL vs IS_ERR() check in spacc_aead_fallback() The crypto_alloc_aead() function doesn't return NULL pointers, it returns error pointers. Fix the error checking. Fixes: 06af76b46c78 ("crypto: spacc - Add SPAcc aead support") Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_aead.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/dwc-spacc/spacc_aead.c b/drivers/crypto/dwc-spacc/spacc_aead.c index abf5971e919d..7f6c48881eab 100755 --- a/drivers/crypto/dwc-spacc/spacc_aead.c +++ b/drivers/crypto/dwc-spacc/spacc_aead.c @@ -769,9 +769,9 @@ static int spacc_aead_fallback(struct aead_request *req, ctx->fb.aead = crypto_alloc_aead(aead_name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); - if (!ctx->fb.aead) { + if (IS_ERR(ctx->fb.aead)) { pr_err("Spacc aead fallback tfm is NULL!\n"); - return -EINVAL; + return PTR_ERR(ctx->fb.aead); } subreq = aead_request_alloc(ctx->fb.aead, GFP_KERNEL); From 3b1c9df662915a18a86f1a88364ee70875ed3b44 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2024 14:20:20 +0300 Subject: [PATCH 48/96] crypto: spacc - Check for allocation failure in spacc_skcipher_fallback() Check for crypto_alloc_skcipher() failure. Fixes: c8981d9230d8 ("crypto: spacc - Add SPAcc Skcipher support") Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_skcipher.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/dwc-spacc/spacc_skcipher.c b/drivers/crypto/dwc-spacc/spacc_skcipher.c index 488c03ff6c36..8c698b75dd92 100644 --- a/drivers/crypto/dwc-spacc/spacc_skcipher.c +++ b/drivers/crypto/dwc-spacc/spacc_skcipher.c @@ -67,6 +67,8 @@ static int spacc_skcipher_fallback(unsigned char *name, tctx->fb.cipher = crypto_alloc_skcipher(name, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(tctx->fb.cipher)) + return PTR_ERR(tctx->fb.cipher); crypto_skcipher_set_reqsize(reqtfm, sizeof(struct spacc_crypto_reqctx) + From 694a6f594817462942acbb1a35b1f7d61e2d49e7 Mon Sep 17 00:00:00 2001 From: Svyatoslav Pankratov Date: Thu, 15 Aug 2024 16:47:23 +0100 Subject: [PATCH 49/96] crypto: qat - fix "Full Going True" macro definition The macro `ADF_RP_INT_SRC_SEL_F_RISE_MASK` is currently set to the value `0100b` which means "Empty Going False". This might cause an incorrect restore of the bank state during live migration. Fix the definition of the macro to properly represent the "Full Going True" state which is encoded as `0011b`. Fixes: bbfdde7d195f ("crypto: qat - add bank save and restore flows") Signed-off-by: Svyatoslav Pankratov Reviewed-by: Xin Zeng Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 8b10926cedba..e8c53bd76f1b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -83,7 +83,7 @@ #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) /* Ring interrupt */ -#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0) #define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) #define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4 #define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC) From 58bf99100a6dfcc53ba4ab547f1394bb6873b2ac Mon Sep 17 00:00:00 2001 From: Pavitrakumar M Date: Fri, 16 Aug 2024 17:33:32 +0530 Subject: [PATCH 50/96] crypto: spacc - Fix counter width checks This patch fixes counter width checks according to the version extension3 register. The counter widths can be 8, 16, 32 and 64 bits as per the extension3 register. Signed-off-by: Bhoomika K Signed-off-by: Pavitrakumar M Acked-by: Ruud Derwig Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_skcipher.c | 45 ++++++++++++----------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/drivers/crypto/dwc-spacc/spacc_skcipher.c b/drivers/crypto/dwc-spacc/spacc_skcipher.c index 8c698b75dd92..1ef7c665188f 100644 --- a/drivers/crypto/dwc-spacc/spacc_skcipher.c +++ b/drivers/crypto/dwc-spacc/spacc_skcipher.c @@ -408,24 +408,8 @@ static int spacc_cipher_process(struct skcipher_request *req, int enc_dec) for (i = 0; i < 16; i++) ivc1[i] = req->iv[i]; - /* 32-bit counter width */ - if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) & (0x2)) { - - for (i = 12; i < 16; i++) { - num_iv <<= 8; - num_iv |= ivc1[i]; - } - - diff = SPACC_CTR_IV_MAX32 - num_iv; - - if (len > diff) { - name = salg->calg->cra_name; - ret = spacc_skcipher_fallback(name, - req, enc_dec); - return ret; - } - } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) - & (0x3)) { /* 64-bit counter width */ + /* 64-bit counter width */ + if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) & (0x3)) { for (i = 8; i < 16; i++) { num_iv64 <<= 8; @@ -440,8 +424,26 @@ static int spacc_cipher_process(struct skcipher_request *req, int enc_dec) req, enc_dec); return ret; } + /* 32-bit counter width */ } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) - & (0x1)) { /* 16-bit counter width */ + & (0x2)) { + + for (i = 12; i < 16; i++) { + num_iv <<= 8; + num_iv |= ivc1[i]; + } + + diff = SPACC_CTR_IV_MAX32 - num_iv; + + if (len > diff) { + name = salg->calg->cra_name; + ret = spacc_skcipher_fallback(name, + req, enc_dec); + return ret; + } + /* 16-bit counter width */ + } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) + & (0x1)) { for (i = 14; i < 16; i++) { num_iv <<= 8; @@ -456,8 +458,9 @@ static int spacc_cipher_process(struct skcipher_request *req, int enc_dec) req, enc_dec); return ret; } - } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) - & (0x0)) { /* 8-bit counter width */ + /* 8-bit counter width */ + } else if ((readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) + & 0x7) == 0) { for (i = 15; i < 16; i++) { num_iv <<= 8; From 87a3fcf5fec5fb59ec8f23d12a56bcf2b2ee6db7 Mon Sep 17 00:00:00 2001 From: Pavitrakumar M Date: Fri, 16 Aug 2024 17:33:33 +0530 Subject: [PATCH 51/96] crypto: spacc - Fixed return to CRYPTO_OK Removed CRYPTO_USED_JB and returning CRYPTO_OK instead. Signed-off-by: Bhoomika K Signed-off-by: Pavitrakumar M Acked-by: Ruud Derwig Signed-off-by: Herbert Xu --- drivers/crypto/dwc-spacc/spacc_core.c | 6 +++--- drivers/crypto/dwc-spacc/spacc_core.h | 2 -- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/dwc-spacc/spacc_core.c b/drivers/crypto/dwc-spacc/spacc_core.c index b9e0d3227f81..1da7cdd93e78 100644 --- a/drivers/crypto/dwc-spacc/spacc_core.c +++ b/drivers/crypto/dwc-spacc/spacc_core.c @@ -1103,7 +1103,7 @@ int spacc_packet_enqueue_ddt_ex(struct spacc_device *spacc, int use_jb, { int i; struct spacc_job *job; - int ret = CRYPTO_OK, proc_len; + int proc_len; if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) return -ENXIO; @@ -1222,7 +1222,7 @@ int spacc_packet_enqueue_ddt_ex(struct spacc_device *spacc, int use_jb, job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_KEY_EXP); } - return ret; + return CRYPTO_OK; fifo_full: /* try to add a job to the job buffers*/ @@ -1248,7 +1248,7 @@ fifo_full: spacc->jb_head = i; - return CRYPTO_USED_JB; + return CRYPTO_OK; } int spacc_packet_enqueue_ddt(struct spacc_device *spacc, int job_idx, diff --git a/drivers/crypto/dwc-spacc/spacc_core.h b/drivers/crypto/dwc-spacc/spacc_core.h index 399b7c976151..297a08eea0d2 100644 --- a/drivers/crypto/dwc-spacc/spacc_core.h +++ b/drivers/crypto/dwc-spacc/spacc_core.h @@ -333,8 +333,6 @@ enum { #define SPACC_MAX_JOB_BUFFERS 192 #endif -#define CRYPTO_USED_JB 256 - /* max DDT particle size */ #ifndef SPACC_MAX_PARTICLE_SIZE #define SPACC_MAX_PARTICLE_SIZE 4096 From 311eea7e37c4c0b44b557d0c100860a03b4eab65 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 17 Aug 2024 12:13:23 +0800 Subject: [PATCH 52/96] crypto: octeontx - Fix authenc setkey Use the generic crypto_authenc_extractkeys helper instead of custom parsing code that is slightly broken. Also fix a number of memory leaks by moving memory allocation from setkey to init_tfm (setkey can be called multiple times over the life of a tfm). Finally accept all hash key lengths by running the digest over extra-long keys. Signed-off-by: Herbert Xu --- .../crypto/marvell/octeontx/otx_cptvf_algs.c | 265 +++++++----------- 1 file changed, 95 insertions(+), 170 deletions(-) diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index e53c79fe6342..096be42e9d03 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include "otx_cptvf.h" @@ -66,6 +65,8 @@ static struct cpt_device_table ae_devices = { .count = ATOMIC_INIT(0) }; +static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg); + static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) { int count, ret = 0; @@ -509,44 +510,61 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type) ctx->cipher_type = cipher_type; ctx->mac_type = mac_type; + switch (ctx->mac_type) { + case OTX_CPT_SHA1: + ctx->hashalg = crypto_alloc_shash("sha1", 0, 0); + break; + + case OTX_CPT_SHA256: + ctx->hashalg = crypto_alloc_shash("sha256", 0, 0); + break; + + case OTX_CPT_SHA384: + ctx->hashalg = crypto_alloc_shash("sha384", 0, 0); + break; + + case OTX_CPT_SHA512: + ctx->hashalg = crypto_alloc_shash("sha512", 0, 0); + break; + } + + if (IS_ERR(ctx->hashalg)) + return PTR_ERR(ctx->hashalg); + + crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx)); + + if (!ctx->hashalg) + return 0; + /* * When selected cipher is NULL we use HMAC opcode instead of * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms * for calculating ipad and opad */ if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) { - switch (ctx->mac_type) { - case OTX_CPT_SHA1: - ctx->hashalg = crypto_alloc_shash("sha1", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + int ss = crypto_shash_statesize(ctx->hashalg); - case OTX_CPT_SHA256: - ctx->hashalg = crypto_alloc_shash("sha256", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + ctx->ipad = kzalloc(ss, GFP_KERNEL); + if (!ctx->ipad) { + crypto_free_shash(ctx->hashalg); + return -ENOMEM; + } - case OTX_CPT_SHA384: - ctx->hashalg = crypto_alloc_shash("sha384", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; - - case OTX_CPT_SHA512: - ctx->hashalg = crypto_alloc_shash("sha512", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + ctx->opad = kzalloc(ss, GFP_KERNEL); + if (!ctx->opad) { + kfree(ctx->ipad); + crypto_free_shash(ctx->hashalg); + return -ENOMEM; } } - crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx)); + ctx->sdesc = alloc_sdesc(ctx->hashalg); + if (!ctx->sdesc) { + kfree(ctx->opad); + kfree(ctx->ipad); + crypto_free_shash(ctx->hashalg); + return -ENOMEM; + } return 0; } @@ -602,8 +620,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm) kfree(ctx->ipad); kfree(ctx->opad); - if (ctx->hashalg) - crypto_free_shash(ctx->hashalg); + crypto_free_shash(ctx->hashalg); kfree(ctx->sdesc); } @@ -699,7 +716,7 @@ static inline void swap_data64(void *buf, u32 len) *dst = cpu_to_be64p(src); } -static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) +static int swap_pad(u8 mac_type, u8 *pad) { struct sha512_state *sha512; struct sha256_state *sha256; @@ -707,22 +724,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) switch (mac_type) { case OTX_CPT_SHA1: - sha1 = (struct sha1_state *) in_pad; + sha1 = (struct sha1_state *)pad; swap_data32(sha1->state, SHA1_DIGEST_SIZE); - memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE); break; case OTX_CPT_SHA256: - sha256 = (struct sha256_state *) in_pad; + sha256 = (struct sha256_state *)pad; swap_data32(sha256->state, SHA256_DIGEST_SIZE); - memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE); break; case OTX_CPT_SHA384: case OTX_CPT_SHA512: - sha512 = (struct sha512_state *) in_pad; + sha512 = (struct sha512_state *)pad; swap_data64(sha512->state, SHA512_DIGEST_SIZE); - memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE); break; default: @@ -732,55 +746,53 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) return 0; } -static int aead_hmac_init(struct crypto_aead *cipher) +static int aead_hmac_init(struct crypto_aead *cipher, + struct crypto_authenc_keys *keys) { struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - int state_size = crypto_shash_statesize(ctx->hashalg); int ds = crypto_shash_digestsize(ctx->hashalg); int bs = crypto_shash_blocksize(ctx->hashalg); - int authkeylen = ctx->auth_key_len; + int authkeylen = keys->authkeylen; u8 *ipad = NULL, *opad = NULL; - int ret = 0, icount = 0; - - ctx->sdesc = alloc_sdesc(ctx->hashalg); - if (!ctx->sdesc) - return -ENOMEM; - - ctx->ipad = kzalloc(bs, GFP_KERNEL); - if (!ctx->ipad) { - ret = -ENOMEM; - goto calc_fail; - } - - ctx->opad = kzalloc(bs, GFP_KERNEL); - if (!ctx->opad) { - ret = -ENOMEM; - goto calc_fail; - } - - ipad = kzalloc(state_size, GFP_KERNEL); - if (!ipad) { - ret = -ENOMEM; - goto calc_fail; - } - - opad = kzalloc(state_size, GFP_KERNEL); - if (!opad) { - ret = -ENOMEM; - goto calc_fail; - } + int icount = 0; + int ret; if (authkeylen > bs) { - ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key, - authkeylen, ipad); + ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey, + authkeylen, ctx->key); if (ret) - goto calc_fail; - + return ret; authkeylen = ds; - } else { - memcpy(ipad, ctx->key, authkeylen); + } else + memcpy(ctx->key, keys->authkey, authkeylen); + + ctx->enc_key_len = keys->enckeylen; + ctx->auth_key_len = authkeylen; + + if (ctx->cipher_type == OTX_CPT_CIPHER_NULL) + return keys->enckeylen ? -EINVAL : 0; + + switch (keys->enckeylen) { + case AES_KEYSIZE_128: + ctx->key_type = OTX_CPT_AES_128_BIT; + break; + case AES_KEYSIZE_192: + ctx->key_type = OTX_CPT_AES_192_BIT; + break; + case AES_KEYSIZE_256: + ctx->key_type = OTX_CPT_AES_256_BIT; + break; + default: + /* Invalid key length */ + return -EINVAL; } + memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen); + + ipad = ctx->ipad; + opad = ctx->opad; + + memcpy(ipad, ctx->key, authkeylen); memset(ipad + authkeylen, 0, bs - authkeylen); memcpy(opad, ipad, bs); @@ -798,7 +810,7 @@ static int aead_hmac_init(struct crypto_aead *cipher) crypto_shash_init(&ctx->sdesc->shash); crypto_shash_update(&ctx->sdesc->shash, ipad, bs); crypto_shash_export(&ctx->sdesc->shash, ipad); - ret = copy_pad(ctx->mac_type, ctx->ipad, ipad); + ret = swap_pad(ctx->mac_type, ipad); if (ret) goto calc_fail; @@ -806,25 +818,9 @@ static int aead_hmac_init(struct crypto_aead *cipher) crypto_shash_init(&ctx->sdesc->shash); crypto_shash_update(&ctx->sdesc->shash, opad, bs); crypto_shash_export(&ctx->sdesc->shash, opad); - ret = copy_pad(ctx->mac_type, ctx->opad, opad); - if (ret) - goto calc_fail; - - kfree(ipad); - kfree(opad); - - return 0; + ret = swap_pad(ctx->mac_type, opad); calc_fail: - kfree(ctx->ipad); - ctx->ipad = NULL; - kfree(ctx->opad); - ctx->opad = NULL; - kfree(ipad); - kfree(opad); - kfree(ctx->sdesc); - ctx->sdesc = NULL; - return ret; } @@ -832,57 +828,15 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - struct crypto_authenc_key_param *param; - int enckeylen = 0, authkeylen = 0; - struct rtattr *rta = (void *)key; - int status = -EINVAL; + struct crypto_authenc_keys authenc_keys; + int status; - if (!RTA_OK(rta, keylen)) - goto badkey; - - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < enckeylen) - goto badkey; - - if (keylen > OTX_CPT_MAX_KEY_SIZE) - goto badkey; - - authkeylen = keylen - enckeylen; - memcpy(ctx->key, key, keylen); - - switch (enckeylen) { - case AES_KEYSIZE_128: - ctx->key_type = OTX_CPT_AES_128_BIT; - break; - case AES_KEYSIZE_192: - ctx->key_type = OTX_CPT_AES_192_BIT; - break; - case AES_KEYSIZE_256: - ctx->key_type = OTX_CPT_AES_256_BIT; - break; - default: - /* Invalid key length */ - goto badkey; - } - - ctx->enc_key_len = enckeylen; - ctx->auth_key_len = authkeylen; - - status = aead_hmac_init(cipher); + status = crypto_authenc_extractkeys(&authenc_keys, key, keylen); if (status) goto badkey; - return 0; + status = aead_hmac_init(cipher, &authenc_keys); + badkey: return status; } @@ -891,36 +845,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - struct crypto_authenc_key_param *param; - struct rtattr *rta = (void *)key; - int enckeylen = 0; - - if (!RTA_OK(rta, keylen)) - goto badkey; - - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (enckeylen != 0) - goto badkey; - - if (keylen > OTX_CPT_MAX_KEY_SIZE) - goto badkey; - - memcpy(ctx->key, key, keylen); - ctx->enc_key_len = enckeylen; - ctx->auth_key_len = keylen; - return 0; -badkey: - return -EINVAL; + return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen); } static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, From 7ccb750dcac8abbfc7743aab0db6a72c1c3703c7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 17 Aug 2024 12:36:19 +0800 Subject: [PATCH 53/96] crypto: octeontx2 - Fix authenc setkey Use the generic crypto_authenc_extractkeys helper instead of custom parsing code that is slightly broken. Also fix a number of memory leaks by moving memory allocation from setkey to init_tfm (setkey can be called multiple times over the life of a tfm). Finally accept all hash key lengths by running the digest over extra-long keys. Signed-off-by: Herbert Xu --- .../marvell/octeontx2/otx2_cptvf_algs.c | 256 +++++++----------- 1 file changed, 91 insertions(+), 165 deletions(-) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index ff7cc8c13e73..7eb0bc13994d 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include "otx2_cptvf.h" @@ -55,6 +54,8 @@ static struct cpt_device_table se_devices = { .count = ATOMIC_INIT(0) }; +static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg); + static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) { int count; @@ -598,40 +599,56 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type) ctx->cipher_type = cipher_type; ctx->mac_type = mac_type; + switch (ctx->mac_type) { + case OTX2_CPT_SHA1: + ctx->hashalg = crypto_alloc_shash("sha1", 0, 0); + break; + + case OTX2_CPT_SHA256: + ctx->hashalg = crypto_alloc_shash("sha256", 0, 0); + break; + + case OTX2_CPT_SHA384: + ctx->hashalg = crypto_alloc_shash("sha384", 0, 0); + break; + + case OTX2_CPT_SHA512: + ctx->hashalg = crypto_alloc_shash("sha512", 0, 0); + break; + } + + if (IS_ERR(ctx->hashalg)) + return PTR_ERR(ctx->hashalg); + + if (ctx->hashalg) { + ctx->sdesc = alloc_sdesc(ctx->hashalg); + if (!ctx->sdesc) { + crypto_free_shash(ctx->hashalg); + return -ENOMEM; + } + } + /* * When selected cipher is NULL we use HMAC opcode instead of * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms * for calculating ipad and opad */ - if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) { - switch (ctx->mac_type) { - case OTX2_CPT_SHA1: - ctx->hashalg = crypto_alloc_shash("sha1", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) { + int ss = crypto_shash_statesize(ctx->hashalg); - case OTX2_CPT_SHA256: - ctx->hashalg = crypto_alloc_shash("sha256", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + ctx->ipad = kzalloc(ss, GFP_KERNEL); + if (!ctx->ipad) { + kfree(ctx->sdesc); + crypto_free_shash(ctx->hashalg); + return -ENOMEM; + } - case OTX2_CPT_SHA384: - ctx->hashalg = crypto_alloc_shash("sha384", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; - - case OTX2_CPT_SHA512: - ctx->hashalg = crypto_alloc_shash("sha512", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + ctx->opad = kzalloc(ss, GFP_KERNEL); + if (!ctx->opad) { + kfree(ctx->ipad); + kfree(ctx->sdesc); + crypto_free_shash(ctx->hashalg); + return -ENOMEM; } } switch (ctx->cipher_type) { @@ -713,8 +730,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm) kfree(ctx->ipad); kfree(ctx->opad); - if (ctx->hashalg) - crypto_free_shash(ctx->hashalg); + crypto_free_shash(ctx->hashalg); kfree(ctx->sdesc); if (ctx->fbk_cipher) { @@ -788,7 +804,7 @@ static inline void swap_data64(void *buf, u32 len) cpu_to_be64s(src); } -static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) +static int swap_pad(u8 mac_type, u8 *pad) { struct sha512_state *sha512; struct sha256_state *sha256; @@ -796,22 +812,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) switch (mac_type) { case OTX2_CPT_SHA1: - sha1 = (struct sha1_state *) in_pad; + sha1 = (struct sha1_state *)pad; swap_data32(sha1->state, SHA1_DIGEST_SIZE); - memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE); break; case OTX2_CPT_SHA256: - sha256 = (struct sha256_state *) in_pad; + sha256 = (struct sha256_state *)pad; swap_data32(sha256->state, SHA256_DIGEST_SIZE); - memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE); break; case OTX2_CPT_SHA384: case OTX2_CPT_SHA512: - sha512 = (struct sha512_state *) in_pad; + sha512 = (struct sha512_state *)pad; swap_data64(sha512->state, SHA512_DIGEST_SIZE); - memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE); break; default: @@ -821,55 +834,54 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) return 0; } -static int aead_hmac_init(struct crypto_aead *cipher) +static int aead_hmac_init(struct crypto_aead *cipher, + struct crypto_authenc_keys *keys) { struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - int state_size = crypto_shash_statesize(ctx->hashalg); int ds = crypto_shash_digestsize(ctx->hashalg); int bs = crypto_shash_blocksize(ctx->hashalg); - int authkeylen = ctx->auth_key_len; + int authkeylen = keys->authkeylen; u8 *ipad = NULL, *opad = NULL; - int ret = 0, icount = 0; - - ctx->sdesc = alloc_sdesc(ctx->hashalg); - if (!ctx->sdesc) - return -ENOMEM; - - ctx->ipad = kzalloc(bs, GFP_KERNEL); - if (!ctx->ipad) { - ret = -ENOMEM; - goto calc_fail; - } - - ctx->opad = kzalloc(bs, GFP_KERNEL); - if (!ctx->opad) { - ret = -ENOMEM; - goto calc_fail; - } - - ipad = kzalloc(state_size, GFP_KERNEL); - if (!ipad) { - ret = -ENOMEM; - goto calc_fail; - } - - opad = kzalloc(state_size, GFP_KERNEL); - if (!opad) { - ret = -ENOMEM; - goto calc_fail; - } + int icount = 0; + int ret; if (authkeylen > bs) { - ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key, - authkeylen, ipad); + ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey, + authkeylen, ctx->key); if (ret) goto calc_fail; authkeylen = ds; - } else { - memcpy(ipad, ctx->key, authkeylen); + } else + memcpy(ctx->key, keys->authkey, authkeylen); + + ctx->enc_key_len = keys->enckeylen; + ctx->auth_key_len = authkeylen; + + if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL) + return keys->enckeylen ? -EINVAL : 0; + + switch (keys->enckeylen) { + case AES_KEYSIZE_128: + ctx->key_type = OTX2_CPT_AES_128_BIT; + break; + case AES_KEYSIZE_192: + ctx->key_type = OTX2_CPT_AES_192_BIT; + break; + case AES_KEYSIZE_256: + ctx->key_type = OTX2_CPT_AES_256_BIT; + break; + default: + /* Invalid key length */ + return -EINVAL; } + memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen); + + ipad = ctx->ipad; + opad = ctx->opad; + + memcpy(ipad, ctx->key, authkeylen); memset(ipad + authkeylen, 0, bs - authkeylen); memcpy(opad, ipad, bs); @@ -887,7 +899,7 @@ static int aead_hmac_init(struct crypto_aead *cipher) crypto_shash_init(&ctx->sdesc->shash); crypto_shash_update(&ctx->sdesc->shash, ipad, bs); crypto_shash_export(&ctx->sdesc->shash, ipad); - ret = copy_pad(ctx->mac_type, ctx->ipad, ipad); + ret = swap_pad(ctx->mac_type, ipad); if (ret) goto calc_fail; @@ -895,25 +907,9 @@ static int aead_hmac_init(struct crypto_aead *cipher) crypto_shash_init(&ctx->sdesc->shash); crypto_shash_update(&ctx->sdesc->shash, opad, bs); crypto_shash_export(&ctx->sdesc->shash, opad); - ret = copy_pad(ctx->mac_type, ctx->opad, opad); - if (ret) - goto calc_fail; - - kfree(ipad); - kfree(opad); - - return 0; + ret = swap_pad(ctx->mac_type, opad); calc_fail: - kfree(ctx->ipad); - ctx->ipad = NULL; - kfree(ctx->opad); - ctx->opad = NULL; - kfree(ipad); - kfree(opad); - kfree(ctx->sdesc); - ctx->sdesc = NULL; - return ret; } @@ -921,87 +917,17 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - struct crypto_authenc_key_param *param; - int enckeylen = 0, authkeylen = 0; - struct rtattr *rta = (void *)key; + struct crypto_authenc_keys authenc_keys; - if (!RTA_OK(rta, keylen)) - return -EINVAL; - - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - return -EINVAL; - - if (RTA_PAYLOAD(rta) < sizeof(*param)) - return -EINVAL; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < enckeylen) - return -EINVAL; - - if (keylen > OTX2_CPT_MAX_KEY_SIZE) - return -EINVAL; - - authkeylen = keylen - enckeylen; - memcpy(ctx->key, key, keylen); - - switch (enckeylen) { - case AES_KEYSIZE_128: - ctx->key_type = OTX2_CPT_AES_128_BIT; - break; - case AES_KEYSIZE_192: - ctx->key_type = OTX2_CPT_AES_192_BIT; - break; - case AES_KEYSIZE_256: - ctx->key_type = OTX2_CPT_AES_256_BIT; - break; - default: - /* Invalid key length */ - return -EINVAL; - } - - ctx->enc_key_len = enckeylen; - ctx->auth_key_len = authkeylen; - - return aead_hmac_init(cipher); + return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?: + aead_hmac_init(cipher, &authenc_keys); } static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - struct crypto_authenc_key_param *param; - struct rtattr *rta = (void *)key; - int enckeylen = 0; - - if (!RTA_OK(rta, keylen)) - return -EINVAL; - - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - return -EINVAL; - - if (RTA_PAYLOAD(rta) < sizeof(*param)) - return -EINVAL; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (enckeylen != 0) - return -EINVAL; - - if (keylen > OTX2_CPT_MAX_KEY_SIZE) - return -EINVAL; - - memcpy(ctx->key, key, keylen); - ctx->enc_key_len = enckeylen; - ctx->auth_key_len = keylen; - - return 0; + return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen); } static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, From 96ad595520591f8bd9c5fbe901b56561fa9c8a9e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 17 Aug 2024 14:56:51 +0800 Subject: [PATCH 54/96] crypto: api - Remove instance larval fulfilment In order to allow testing to complete asynchronously after the registration process, instance larvals need to complete prior to having a test result. Support this by redoing the lookup for instance larvals after completion. This should locate the pending test larval and then repeat the wait on that (if it is still pending). As the lookup is now repeated there is no longer any need to compute the fulfilment status and all that code can be removed. Signed-off-by: Herbert Xu --- crypto/algapi.c | 48 +++--------------------------------------------- crypto/algboss.c | 1 + crypto/api.c | 23 +++++++++++++++++++---- 3 files changed, 23 insertions(+), 49 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 122cd910c4e1..d2ccc1289f92 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -235,7 +235,6 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, EXPORT_SYMBOL_GPL(crypto_remove_spawns); static void crypto_alg_finish_registration(struct crypto_alg *alg, - bool fulfill_requests, struct list_head *algs_to_put) { struct crypto_alg *q; @@ -247,30 +246,8 @@ static void crypto_alg_finish_registration(struct crypto_alg *alg, if (crypto_is_moribund(q)) continue; - if (crypto_is_larval(q)) { - struct crypto_larval *larval = (void *)q; - - /* - * Check to see if either our generic name or - * specific name can satisfy the name requested - * by the larval entry q. - */ - if (strcmp(alg->cra_name, q->cra_name) && - strcmp(alg->cra_driver_name, q->cra_name)) - continue; - - if (larval->adult) - continue; - if ((q->cra_flags ^ alg->cra_flags) & larval->mask) - continue; - - if (fulfill_requests && crypto_mod_get(alg)) - larval->adult = alg; - else - larval->adult = ERR_PTR(-EAGAIN); - + if (crypto_is_larval(q)) continue; - } if (strcmp(alg->cra_name, q->cra_name)) continue; @@ -359,7 +336,7 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put) list_add(&larval->alg.cra_list, &crypto_alg_list); } else { alg->cra_flags |= CRYPTO_ALG_TESTED; - crypto_alg_finish_registration(alg, true, algs_to_put); + crypto_alg_finish_registration(alg, algs_to_put); } out: @@ -376,7 +353,6 @@ void crypto_alg_tested(const char *name, int err) struct crypto_alg *alg; struct crypto_alg *q; LIST_HEAD(list); - bool best; down_write(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -408,25 +384,7 @@ found: alg->cra_flags |= CRYPTO_ALG_TESTED; - /* - * If a higher-priority implementation of the same algorithm is - * currently being tested, then don't fulfill request larvals. - */ - best = true; - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (crypto_is_moribund(q) || !crypto_is_larval(q)) - continue; - - if (strcmp(alg->cra_name, q->cra_name)) - continue; - - if (q->cra_priority > alg->cra_priority) { - best = false; - break; - } - } - - crypto_alg_finish_registration(alg, best, &list); + crypto_alg_finish_registration(alg, &list); complete: complete_all(&test->completion); diff --git a/crypto/algboss.c b/crypto/algboss.c index 1aa5f306998a..d05a5aad2176 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -64,6 +64,7 @@ static int cryptomgr_probe(void *data) crypto_tmpl_put(tmpl); out: + param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD; complete_all(¶m->larval->completion); crypto_alg_put(¶m->larval->alg); kfree(param); diff --git a/crypto/api.c b/crypto/api.c index 22556907b3bc..ffb81aa32725 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -37,6 +37,8 @@ DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); #endif static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, + u32 mask); struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { @@ -201,9 +203,12 @@ static void crypto_start_test(struct crypto_larval *larval) static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) { - struct crypto_larval *larval = (void *)alg; + struct crypto_larval *larval; long time_left; +again: + larval = container_of(alg, struct crypto_larval, alg); + if (!crypto_boot_test_finished()) crypto_start_test(larval); @@ -215,9 +220,16 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) alg = ERR_PTR(-EINTR); else if (!time_left) alg = ERR_PTR(-ETIMEDOUT); - else if (!alg) - alg = ERR_PTR(-ENOENT); - else if (IS_ERR(alg)) + else if (!alg) { + u32 type; + u32 mask; + + alg = &larval->alg; + type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); + mask = larval->mask; + alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: + ERR_PTR(-ENOENT); + } else if (IS_ERR(alg)) ; else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) @@ -228,6 +240,9 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) alg = ERR_PTR(-EAGAIN); crypto_mod_put(&larval->alg); + if (!IS_ERR(alg) && crypto_is_larval(alg)) + goto again; + return alg; } From 37da5d0ffa7b61f79156fbbd3369f17b9a1638bd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 17 Aug 2024 14:57:40 +0800 Subject: [PATCH 55/96] crypto: api - Do not wait for tests during registration As registration is usually carried out during module init, this is a context where as little work as possible should be carried out. Testing may trigger module loads of underlying components, which could even lead back to the module that is registering at the moment. This may lead to dead-locks outside of the Crypto API. Avoid this by not waiting for the tests to complete. They will be scheduled but completion will be asynchronous. Any users will still wait for completion. Reported-by: Russell King Signed-off-by: Herbert Xu --- crypto/algapi.c | 23 ++++++++++++----------- crypto/api.c | 41 +++++++++++++++++++++-------------------- crypto/internal.h | 3 +-- 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index d2ccc1289f92..74e2261c184c 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -366,7 +366,8 @@ void crypto_alg_tested(const char *name, int err) } pr_err("alg: Unexpected test result for %s: %d\n", name, err); - goto unlock; + up_write(&crypto_alg_sem); + return; found: q->cra_flags |= CRYPTO_ALG_DEAD; @@ -387,11 +388,12 @@ found: crypto_alg_finish_registration(alg, &list); complete: + list_del_init(&test->alg.cra_list); complete_all(&test->completion); -unlock: up_write(&crypto_alg_sem); + crypto_alg_put(&test->alg); crypto_remove_final(&list); } EXPORT_SYMBOL_GPL(crypto_alg_tested); @@ -412,7 +414,6 @@ int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; LIST_HEAD(algs_to_put); - bool test_started = false; int err; alg->cra_flags &= ~CRYPTO_ALG_DEAD; @@ -423,15 +424,16 @@ int crypto_register_alg(struct crypto_alg *alg) down_write(&crypto_alg_sem); larval = __crypto_register_alg(alg, &algs_to_put); if (!IS_ERR_OR_NULL(larval)) { - test_started = crypto_boot_test_finished(); + bool test_started = crypto_boot_test_finished(); + larval->test_started = test_started; + if (test_started) + crypto_schedule_test(larval); } up_write(&crypto_alg_sem); if (IS_ERR(larval)) return PTR_ERR(larval); - if (test_started) - crypto_wait_for_test(larval); crypto_remove_final(&algs_to_put); return 0; } @@ -646,8 +648,10 @@ int crypto_register_instance(struct crypto_template *tmpl, larval = __crypto_register_alg(&inst->alg, &algs_to_put); if (IS_ERR(larval)) goto unlock; - else if (larval) + else if (larval) { larval->test_started = true; + crypto_schedule_test(larval); + } hlist_add_head(&inst->list, &tmpl->instances); inst->tmpl = tmpl; @@ -657,8 +661,6 @@ unlock: if (IS_ERR(larval)) return PTR_ERR(larval); - if (larval) - crypto_wait_for_test(larval); crypto_remove_final(&algs_to_put); return 0; } @@ -1042,6 +1044,7 @@ static void __init crypto_start_tests(void) l->test_started = true; larval = l; + crypto_schedule_test(larval); break; } @@ -1049,8 +1052,6 @@ static void __init crypto_start_tests(void) if (!larval) break; - - crypto_wait_for_test(larval); } set_crypto_boot_test_finished(); diff --git a/crypto/api.c b/crypto/api.c index ffb81aa32725..bbe29d438815 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -154,32 +154,31 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, return alg; } -void crypto_larval_kill(struct crypto_alg *alg) +static void crypto_larval_kill(struct crypto_larval *larval) { - struct crypto_larval *larval = (void *)alg; + bool unlinked; down_write(&crypto_alg_sem); - list_del(&alg->cra_list); + unlinked = list_empty(&larval->alg.cra_list); + if (!unlinked) + list_del_init(&larval->alg.cra_list); up_write(&crypto_alg_sem); - complete_all(&larval->completion); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_larval_kill); -void crypto_wait_for_test(struct crypto_larval *larval) + if (unlinked) + return; + + complete_all(&larval->completion); + crypto_alg_put(&larval->alg); +} + +void crypto_schedule_test(struct crypto_larval *larval) { int err; err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); - if (WARN_ON_ONCE(err != NOTIFY_STOP)) - goto out; - - err = wait_for_completion_killable(&larval->completion); - WARN_ON(err); -out: - crypto_larval_kill(&larval->alg); + WARN_ON_ONCE(err != NOTIFY_STOP); } -EXPORT_SYMBOL_GPL(crypto_wait_for_test); +EXPORT_SYMBOL_GPL(crypto_schedule_test); static void crypto_start_test(struct crypto_larval *larval) { @@ -198,7 +197,7 @@ static void crypto_start_test(struct crypto_larval *larval) larval->test_started = true; up_write(&crypto_alg_sem); - crypto_wait_for_test(larval); + crypto_schedule_test(larval); } static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) @@ -218,9 +217,11 @@ again: alg = larval->adult; if (time_left < 0) alg = ERR_PTR(-EINTR); - else if (!time_left) + else if (!time_left) { + if (crypto_is_test_larval(larval)) + crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); - else if (!alg) { + } else if (!alg) { u32 type; u32 mask; @@ -355,7 +356,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); } - crypto_larval_kill(larval); + crypto_larval_kill(container_of(larval, struct crypto_larval, alg)); return alg; } EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); diff --git a/crypto/internal.h b/crypto/internal.h index aee31319be2e..711a6a5bfa2b 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -113,8 +113,7 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); -void crypto_larval_kill(struct crypto_alg *alg); -void crypto_wait_for_test(struct crypto_larval *larval); +void crypto_schedule_test(struct crypto_larval *larval); void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, From 3c44d31cb34ce4eb8311a2e73634d57702948230 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 17 Aug 2024 14:58:35 +0800 Subject: [PATCH 56/96] crypto: simd - Do not call crypto_alloc_tfm during registration Algorithm registration is usually carried out during module init, where as little work as possible should be carried out. The SIMD code violated this rule by allocating a tfm, this then triggers a full test of the algorithm which may dead-lock in certain cases. SIMD is only allocating the tfm to get at the alg object, which is in fact already available as it is what we are registering. Use that directly and remove the crypto_alloc_tfm call. Also remove some obsolete and unused SIMD API. Signed-off-by: Herbert Xu --- arch/arm/crypto/aes-ce-glue.c | 2 +- arch/arm/crypto/aes-neonbs-glue.c | 2 +- crypto/simd.c | 76 ++++++------------------------- include/crypto/internal/simd.h | 12 +---- 4 files changed, 19 insertions(+), 73 deletions(-) diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index b668c97663ec..f5b66f4cf45d 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -711,7 +711,7 @@ static int __init aes_init(void) algname = aes_algs[i].base.cra_name + 2; drvname = aes_algs[i].base.cra_driver_name + 2; basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index fd04f855b2f5..f6be80b5938b 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -491,7 +491,7 @@ static int __init aes_init(void) algname = aes_algs[i].base.cra_name + 2; drvname = aes_algs[i].base.cra_driver_name + 2; basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; diff --git a/crypto/simd.c b/crypto/simd.c index 2aa4f72e224f..b07721d1f3f6 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm) return 0; } -struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, +struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, + const char *algname, const char *drvname, const char *basename) { struct simd_skcipher_alg *salg; - struct crypto_skcipher *tfm; - struct skcipher_alg *ialg; struct skcipher_alg *alg; int err; - tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - ialg = crypto_skcipher_alg(tfm); - salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); - goto out_put_tfm; + goto out; } salg->ialg_name = basename; @@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, if (err) goto out_free_salg; -out_put_tfm: - crypto_free_skcipher(tfm); +out: return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); - goto out_put_tfm; + goto out; } EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); -struct simd_skcipher_alg *simd_skcipher_create(const char *algname, - const char *basename) -{ - char drvname[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= - CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-ENAMETOOLONG); - - return simd_skcipher_create_compat(algname, drvname, basename); -} -EXPORT_SYMBOL_GPL(simd_skcipher_create); - void simd_skcipher_free(struct simd_skcipher_alg *salg) { crypto_unregister_skcipher(&salg->alg); @@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto err_unregister; @@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm) return 0; } -struct simd_aead_alg *simd_aead_create_compat(const char *algname, - const char *drvname, - const char *basename) +static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg, + const char *algname, + const char *drvname, + const char *basename) { struct simd_aead_alg *salg; - struct crypto_aead *tfm; - struct aead_alg *ialg; struct aead_alg *alg; int err; - tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - ialg = crypto_aead_alg(tfm); - salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); - goto out_put_tfm; + goto out; } salg->ialg_name = basename; @@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, if (err) goto out_free_salg; -out_put_tfm: - crypto_free_aead(tfm); +out: return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); - goto out_put_tfm; + goto out; } -EXPORT_SYMBOL_GPL(simd_aead_create_compat); -struct simd_aead_alg *simd_aead_create(const char *algname, - const char *basename) -{ - char drvname[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= - CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-ENAMETOOLONG); - - return simd_aead_create_compat(algname, drvname, basename); -} -EXPORT_SYMBOL_GPL(simd_aead_create); - -void simd_aead_free(struct simd_aead_alg *salg) +static void simd_aead_free(struct simd_aead_alg *salg) { crypto_unregister_aead(&salg->alg); kfree(salg); } -EXPORT_SYMBOL_GPL(simd_aead_free); int simd_register_aeads_compat(struct aead_alg *algs, int count, struct simd_aead_alg **simd_algs) @@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count, algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; - simd = simd_aead_create_compat(algname, drvname, basename); + simd = simd_aead_create_compat(algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto err_unregister; diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h index d2316242a988..be97b97a75dd 100644 --- a/include/crypto/internal/simd.h +++ b/include/crypto/internal/simd.h @@ -14,11 +14,10 @@ struct simd_skcipher_alg; struct skcipher_alg; -struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, +struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, + const char *algname, const char *drvname, const char *basename); -struct simd_skcipher_alg *simd_skcipher_create(const char *algname, - const char *basename); void simd_skcipher_free(struct simd_skcipher_alg *alg); int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, @@ -32,13 +31,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count, struct simd_aead_alg; struct aead_alg; -struct simd_aead_alg *simd_aead_create_compat(const char *algname, - const char *drvname, - const char *basename); -struct simd_aead_alg *simd_aead_create(const char *algname, - const char *basename); -void simd_aead_free(struct simd_aead_alg *alg); - int simd_register_aeads_compat(struct aead_alg *algs, int count, struct simd_aead_alg **simd_algs); From ce3d2d6b150ba8528f3218ebf0cee2c2c572662d Mon Sep 17 00:00:00 2001 From: Pavan Kumar Paluri Date: Thu, 15 Aug 2024 07:25:00 -0500 Subject: [PATCH 57/96] crypto: ccp - Properly unregister /dev/sev on sev PLATFORM_STATUS failure In case of sev PLATFORM_STATUS failure, sev_get_api_version() fails resulting in sev_data field of psp_master nulled out. This later becomes a problem when unloading the ccp module because the device has not been unregistered (via misc_deregister()) before clearing the sev_data field of psp_master. As a result, on reloading the ccp module, a duplicate device issue is encountered as can be seen from the dmesg log below. on reloading ccp module via modprobe ccp Call Trace: dump_stack_lvl+0xd7/0xf0 dump_stack+0x10/0x20 sysfs_warn_dup+0x5c/0x70 sysfs_create_dir_ns+0xbc/0xd kobject_add_internal+0xb1/0x2f0 kobject_add+0x7a/0xe0 ? srso_alias_return_thunk+0x5/0xfbef5 ? get_device_parent+0xd4/0x1e0 ? __pfx_klist_children_get+0x10/0x10 device_add+0x121/0x870 ? srso_alias_return_thunk+0x5/0xfbef5 device_create_groups_vargs+0xdc/0x100 device_create_with_groups+0x3f/0x60 misc_register+0x13b/0x1c0 sev_dev_init+0x1d4/0x290 [ccp] psp_dev_init+0x136/0x300 [ccp] sp_init+0x6f/0x80 [ccp] sp_pci_probe+0x2a6/0x310 [ccp] ? srso_alias_return_thunk+0x5/0xfbef5 local_pci_probe+0x4b/0xb0 work_for_cpu_fn+0x1a/0x30 process_one_work+0x203/0x600 worker_thread+0x19e/0x350 ? __pfx_worker_thread+0x10/0x10 kthread+0xeb/0x120 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x3c/0x60 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1a/0x30 kobject: kobject_add_internal failed for sev with -EEXIST, don't try to register things with the same name in the same directory. ccp 0000:22:00.1: sev initialization failed ccp 0000:22:00.1: psp initialization failed ccp 0000:a2:00.1: no command queues available ccp 0000:a2:00.1: psp enabled Address this issue by unregistering the /dev/sev before clearing out sev_data in case of PLATFORM_STATUS failure. Fixes: 200664d5237f ("crypto: ccp: Add Secure Encrypted Virtualization (SEV) command support") Cc: stable@vger.kernel.org Signed-off-by: Pavan Kumar Paluri Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 3b89f23afcd7..ff17b651c328 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -2419,6 +2419,8 @@ void sev_pci_init(void) return; err: + sev_dev_destroy(psp_master); + psp_master->sev_data = NULL; } From 7f60adffe531c06bacab79dbf687f0ea85fb99e8 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 19 Aug 2024 16:18:44 +0200 Subject: [PATCH 58/96] crypto: jitter - Use min() to simplify jent_read_entropy() Use the min() macro to simplify the jent_read_entropy() function and improve its readability. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- crypto/jitterentropy.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index d7056de8c0d7..3b390bd6c119 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -146,6 +146,7 @@ struct rand_data { #define JENT_ENTROPY_SAFETY_FACTOR 64 #include +#include #include "jitterentropy.h" /*************************************************************************** @@ -638,10 +639,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, return -2; } - if ((DATA_SIZE_BITS / 8) < len) - tocopy = (DATA_SIZE_BITS / 8); - else - tocopy = len; + tocopy = min(DATA_SIZE_BITS / 8, len); if (jent_read_random_block(ec->hash_state, p, tocopy)) return -1; From 9a7db819a184c21f605a4364131762d5db0c7010 Mon Sep 17 00:00:00 2001 From: Zhu Jun Date: Tue, 20 Aug 2024 00:42:42 -0700 Subject: [PATCH 59/96] crypto: tools/ccp - Remove unused variable the variable is never referenced in the code, just remove them. Signed-off-by: Zhu Jun Reviewed-by: Mario Limonciello Signed-off-by: Herbert Xu --- tools/crypto/ccp/dbc.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/crypto/ccp/dbc.c b/tools/crypto/ccp/dbc.c index a807df0f0597..80248d3d3a5a 100644 --- a/tools/crypto/ccp/dbc.c +++ b/tools/crypto/ccp/dbc.c @@ -57,7 +57,6 @@ int process_param(int fd, int msg_index, __u8 *signature, int *data) .msg_index = msg_index, .param = *data, }; - int ret; assert(signature); assert(data); From f132386dc5e1c2dc5eeaf8742afb663a9666b6f4 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 20 Aug 2024 16:25:20 +0800 Subject: [PATCH 60/96] crypto: safexcel - Remove unused declaration safexcel_ring_first_rptr() Commit 9744fec95f06 ("crypto: inside-secure - remove request list to improve performance") declar this but never implemented. Signed-off-by: Yue Haibing Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index d0059ce954dd..0c79ad78d1c0 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -897,7 +897,6 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, int safexcel_select_ring(struct safexcel_crypto_priv *priv); void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, struct safexcel_desc_ring *ring); -void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring); void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, struct safexcel_desc_ring *ring); struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, From 106990f3b605c2fabe1ede86371ca7cc9b98ead9 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 20 Aug 2024 16:25:21 +0800 Subject: [PATCH 61/96] crypto: sl3516 - Remove unused declaration sl3516_ce_enqueue() This function is never implemented and used since introduction in commit 46c5338db7bd ("crypto: sl3516 - Add sl3516 crypto engine") Signed-off-by: Yue Haibing Signed-off-by: Herbert Xu --- drivers/crypto/gemini/sl3516-ce.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/gemini/sl3516-ce.h b/drivers/crypto/gemini/sl3516-ce.h index 9e1a7e7f8961..56b844d0cd9c 100644 --- a/drivers/crypto/gemini/sl3516-ce.h +++ b/drivers/crypto/gemini/sl3516-ce.h @@ -326,8 +326,6 @@ struct sl3516_ce_alg_template { unsigned long stat_bytes; }; -int sl3516_ce_enqueue(struct crypto_async_request *areq, u32 type); - int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int sl3516_ce_cipher_init(struct crypto_tfm *tfm); From 5b6f4cd6fd56e81a37eddf02ef02ec98a4b90632 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 20 Aug 2024 16:25:22 +0800 Subject: [PATCH 62/96] crypto: octeontx - Remove unused declaration otx_cpt_callback() This function is never implemented and used since introduction in commit 10b4f09491bf ("crypto: marvell - add the Virtual Function driver for CPT") Signed-off-by: Yue Haibing Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptvf_algs.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h index 4181b5c5c356..a50b5e2f8d00 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h @@ -185,6 +185,5 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, int num_queues, int num_devices); void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod, enum otx_cptvf_type engine_type); -void otx_cpt_callback(int status, void *arg, void *req); #endif /* __OTX_CPT_ALGS_H */ From 60f911c4ebaf31bdc8de3603541b7b8e2bcdefc7 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 20 Aug 2024 16:25:23 +0800 Subject: [PATCH 63/96] crypto: ccp - Remove unused declaration sp_get_master() This function is never implemented and used since introduction in commit 720419f01832 ("crypto: ccp - Introduce the AMD Secure Processor device"). Signed-off-by: Yue Haibing Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-dev.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 0895de823674..6f9d7063257d 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -138,7 +138,6 @@ struct sp_device *sp_alloc_struct(struct device *dev); int sp_init(struct sp_device *sp); void sp_destroy(struct sp_device *sp); -struct sp_device *sp_get_master(void); int sp_suspend(struct sp_device *sp); int sp_resume(struct sp_device *sp); From 652e01be364b5bf2e7d4097831d1510c7301bdc2 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 20 Aug 2024 16:25:24 +0800 Subject: [PATCH 64/96] crypto: amlogic - Remove unused declaration meson_enqueue() This function is never implemented and used since introduction in commit 48fe583fe541 ("crypto: amlogic - Add crypto accelerator for amlogic GXL"). Signed-off-by: Yue Haibing Signed-off-by: Herbert Xu --- drivers/crypto/amlogic/amlogic-gxl.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index 1013a666c932..d68094ffb70a 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -150,8 +150,6 @@ struct meson_alg_template { #endif }; -int meson_enqueue(struct crypto_async_request *areq, u32 type); - int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int meson_cipher_init(struct crypto_tfm *tfm); From f716045f24c2569448e1491857f638b8ffb6cafb Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 20 Aug 2024 16:25:25 +0800 Subject: [PATCH 65/96] crypto: crypto4xx - Remove unused declaration crypto4xx_free_ctx() This function is never implemented and used since introduction in commit 049359d65527 ("crypto: amcc - Add crypt4xx driver"). Signed-off-by: Yue Haibing Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_core.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 96355d463b04..3adcc5e65694 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -149,7 +149,6 @@ struct crypto4xx_alg { int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); -void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx); int crypto4xx_build_pd(struct crypto_async_request *req, struct crypto4xx_ctx *ctx, struct scatterlist *src, From 065c547d951893201de1368863280bc943a35413 Mon Sep 17 00:00:00 2001 From: Huan Yang Date: Tue, 20 Aug 2024 17:47:11 +0800 Subject: [PATCH 66/96] hwrng: mxc-rnga - Use devm_clk_get_enabled() helpers The devm_clk_get_enabled() helpers: - call devm_clk_get() - call clk_prepare_enable() and register what is needed in order to call clk_disable_unprepare() when needed, as a managed resource. This simplifies the code and avoids the calls to clk_disable_unprepare(). Signed-off-by: Huan Yang Reviewed-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/mxc-rnga.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 94ee18a1120a..f01eb95bee31 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c @@ -147,33 +147,25 @@ static int mxc_rnga_probe(struct platform_device *pdev) mxc_rng->rng.data_present = mxc_rnga_data_present; mxc_rng->rng.data_read = mxc_rnga_data_read; - mxc_rng->clk = devm_clk_get(&pdev->dev, NULL); + mxc_rng->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(mxc_rng->clk)) { dev_err(&pdev->dev, "Could not get rng_clk!\n"); return PTR_ERR(mxc_rng->clk); } - err = clk_prepare_enable(mxc_rng->clk); - if (err) - return err; - mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); - goto err_ioremap; + return err; } err = hwrng_register(&mxc_rng->rng); if (err) { dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); - goto err_ioremap; + return err; } return 0; - -err_ioremap: - clk_disable_unprepare(mxc_rng->clk); - return err; } static void mxc_rnga_remove(struct platform_device *pdev) @@ -181,8 +173,6 @@ static void mxc_rnga_remove(struct platform_device *pdev) struct mxc_rng *mxc_rng = platform_get_drvdata(pdev); hwrng_unregister(&mxc_rng->rng); - - clk_disable_unprepare(mxc_rng->clk); } static const struct of_device_id mxc_rnga_of_match[] = { From 9c2797093a4095a1d686b6c51fbd321a627855ee Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Wed, 21 Aug 2024 20:12:34 +0200 Subject: [PATCH 67/96] hwrng: rockchip - rst is used only during probe The driver uses the rst variable only for an initial reset when the chip is probed. There's no need to store rst in the driver's private data, we can make it a local variable in the probe function. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/rockchip-rng.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c index 548e2f4d1490..0dff9de64bc5 100644 --- a/drivers/char/hw_random/rockchip-rng.c +++ b/drivers/char/hw_random/rockchip-rng.c @@ -52,7 +52,6 @@ struct rk_rng { struct hwrng rng; void __iomem *base; - struct reset_control *rst; int clk_num; struct clk_bulk_data *clk_bulks; }; @@ -132,6 +131,7 @@ out: static int rk_rng_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct reset_control *rst; struct rk_rng *rk_rng; int ret; @@ -148,14 +148,13 @@ static int rk_rng_probe(struct platform_device *pdev) return dev_err_probe(dev, rk_rng->clk_num, "Failed to get clks property\n"); - rk_rng->rst = devm_reset_control_array_get_exclusive(&pdev->dev); - if (IS_ERR(rk_rng->rst)) - return dev_err_probe(dev, PTR_ERR(rk_rng->rst), - "Failed to get reset property\n"); + rst = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(rst)) + return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); - reset_control_assert(rk_rng->rst); + reset_control_assert(rst); udelay(2); - reset_control_deassert(rk_rng->rst); + reset_control_deassert(rst); platform_set_drvdata(pdev, rk_rng); From 866ff78da10178cf98600f59ea353fb1b2b7976e Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Wed, 21 Aug 2024 20:12:35 +0200 Subject: [PATCH 68/96] hwrng: rockchip - handle devm_pm_runtime_enable errors It's unlikely that devm_pm_runtime_enable ever fails. Still, it makes sense to read the return value and handle errors. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/rockchip-rng.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c index 0dff9de64bc5..289b385bbf05 100644 --- a/drivers/char/hw_random/rockchip-rng.c +++ b/drivers/char/hw_random/rockchip-rng.c @@ -169,7 +169,9 @@ static int rk_rng_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); - devm_pm_runtime_enable(dev); + ret = devm_pm_runtime_enable(dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Runtime pm activation failed.\n"); ret = devm_hwrng_register(dev, &rk_rng->rng); if (ret) From c7de6ee3d312ae88f6f9a04afa211b52da613852 Mon Sep 17 00:00:00 2001 From: Frank Li Date: Wed, 21 Aug 2024 15:20:48 -0400 Subject: [PATCH 69/96] dt-bindings: crypto: fsl,sec-v4.0: add second register space for rtic Add two description for register space of rtic. There are two register space, one is for control and status, the other optional space is recoverable error indication register space. Fix below CHECK_DTBS error: arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dtb: crypto@1700000: rtic@60000:reg: [[393216, 256], [396800, 24]] is too long from schema $id: http://devicetree.org/schemas/crypto/fsl,sec-v4.0.yaml# Signed-off-by: Frank Li Acked-by: Conor Dooley Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml index 0a9ed2848b7c..9c8c9991f29a 100644 --- a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml +++ b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml @@ -137,7 +137,10 @@ patternProperties: - const: fsl,sec-v4.0-rtic reg: - maxItems: 1 + items: + - description: RTIC control and status register space. + - description: RTIC recoverable error indication register space. + minItems: 1 ranges: maxItems: 1 From 24cc57d8faaa4060fd58adf810b858fcfb71a02f Mon Sep 17 00:00:00 2001 From: Kamlesh Gurudasani Date: Thu, 22 Aug 2024 02:32:52 +0530 Subject: [PATCH 70/96] padata: Honor the caller's alignment in case of chunk_size 0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the case where we are forcing the ps.chunk_size to be at least 1, we are ignoring the caller's alignment. Move the forcing of ps.chunk_size to be at least 1 before rounding it up to caller's alignment, so that caller's alignment is honored. While at it, use max() to force the ps.chunk_size to be at least 1 to improve readability. Fixes: 6d45e1c948a8 ("padata: Fix possible divide-by-0 panic in padata_mt_helper()") Signed-off-by: Kamlesh Gurudasani Acked-by:  Waiman Long Acked-by: Daniel Jordan Signed-off-by: Herbert Xu --- kernel/padata.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/padata.c b/kernel/padata.c index 222bccd0c96b..d51bbc76b227 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -513,9 +513,12 @@ void __init padata_do_multithreaded(struct padata_mt_job *job) * thread function. Load balance large jobs between threads by * increasing the number of chunks, guarantee at least the minimum * chunk size from the caller, and honor the caller's alignment. + * Ensure chunk_size is at least 1 to prevent divide-by-0 + * panic in padata_mt_helper(). */ ps.chunk_size = job->size / (ps.nworks * load_balance_factor); ps.chunk_size = max(ps.chunk_size, job->min_chunk); + ps.chunk_size = max(ps.chunk_size, 1ul); ps.chunk_size = roundup(ps.chunk_size, job->align); list_for_each_entry(pw, &works, pw_list) From eb7bb0b56b41e9dd73d340159b8a0ce743352014 Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Fri, 23 Aug 2024 03:42:49 -0600 Subject: [PATCH 71/96] crypto: atmel - use devm_clk_get_prepared() helpers Simplify the code by replacing devm_clk_get() and clk_prepare() with devm_clk_get_prepared(), which also avoids the call to clk_unprepare(). Signed-off-by: Chunhai Guo Signed-off-by: Herbert Xu --- drivers/crypto/atmel-aes.c | 16 ++++------------ drivers/crypto/atmel-sha.c | 14 +++----------- 2 files changed, 7 insertions(+), 23 deletions(-) diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 8bd64fc37e75..0dd90785db9a 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2376,33 +2376,29 @@ static int atmel_aes_probe(struct platform_device *pdev) } /* Initializing the clock */ - aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk"); + aes_dd->iclk = devm_clk_get_prepared(&pdev->dev, "aes_clk"); if (IS_ERR(aes_dd->iclk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(aes_dd->iclk); goto err_tasklet_kill; } - err = clk_prepare(aes_dd->iclk); - if (err) - goto err_tasklet_kill; - err = atmel_aes_hw_version_init(aes_dd); if (err) - goto err_iclk_unprepare; + goto err_tasklet_kill; atmel_aes_get_cap(aes_dd); #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) { err = -EPROBE_DEFER; - goto err_iclk_unprepare; + goto err_tasklet_kill; } #endif err = atmel_aes_buff_init(aes_dd); if (err) - goto err_iclk_unprepare; + goto err_tasklet_kill; err = atmel_aes_dma_init(aes_dd); if (err) @@ -2429,8 +2425,6 @@ err_algs: atmel_aes_dma_cleanup(aes_dd); err_buff_cleanup: atmel_aes_buff_cleanup(aes_dd); -err_iclk_unprepare: - clk_unprepare(aes_dd->iclk); err_tasklet_kill: tasklet_kill(&aes_dd->done_task); tasklet_kill(&aes_dd->queue_task); @@ -2455,8 +2449,6 @@ static void atmel_aes_remove(struct platform_device *pdev) atmel_aes_dma_cleanup(aes_dd); atmel_aes_buff_cleanup(aes_dd); - - clk_unprepare(aes_dd->iclk); } static struct platform_driver atmel_aes_driver = { diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index f4cd6158a4f7..8cc57df25778 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2623,27 +2623,23 @@ static int atmel_sha_probe(struct platform_device *pdev) } /* Initializing the clock */ - sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); + sha_dd->iclk = devm_clk_get_prepared(&pdev->dev, "sha_clk"); if (IS_ERR(sha_dd->iclk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(sha_dd->iclk); goto err_tasklet_kill; } - err = clk_prepare(sha_dd->iclk); - if (err) - goto err_tasklet_kill; - err = atmel_sha_hw_version_init(sha_dd); if (err) - goto err_iclk_unprepare; + goto err_tasklet_kill; atmel_sha_get_cap(sha_dd); if (sha_dd->caps.has_dma) { err = atmel_sha_dma_init(sha_dd); if (err) - goto err_iclk_unprepare; + goto err_tasklet_kill; dev_info(dev, "using %s for DMA transfers\n", dma_chan_name(sha_dd->dma_lch_in.chan)); @@ -2669,8 +2665,6 @@ err_algs: spin_unlock(&atmel_sha.lock); if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); -err_iclk_unprepare: - clk_unprepare(sha_dd->iclk); err_tasklet_kill: tasklet_kill(&sha_dd->queue_task); tasklet_kill(&sha_dd->done_task); @@ -2693,8 +2687,6 @@ static void atmel_sha_remove(struct platform_device *pdev) if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); - - clk_unprepare(sha_dd->iclk); } static struct platform_driver atmel_sha_driver = { From 407f8cf8e6875fc8fb3c0cda193f310340122060 Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Fri, 23 Aug 2024 03:52:12 -0600 Subject: [PATCH 72/96] crypto: img-hash - use devm_clk_get_enabled() helpers Simplify the code by replacing devm_clk_get() and clk_prepare_enable() with devm_clk_get_enabled(), which also avoids the call to clk_disable_unprepare(). Signed-off-by: Chunhai Guo Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index d269036bdaa3..7e93159c3b6b 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -987,31 +987,23 @@ static int img_hash_probe(struct platform_device *pdev) } dev_dbg(dev, "using IRQ channel %d\n", irq); - hdev->hash_clk = devm_clk_get(&pdev->dev, "hash"); + hdev->hash_clk = devm_clk_get_enabled(&pdev->dev, "hash"); if (IS_ERR(hdev->hash_clk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(hdev->hash_clk); goto res_err; } - hdev->sys_clk = devm_clk_get(&pdev->dev, "sys"); + hdev->sys_clk = devm_clk_get_enabled(&pdev->dev, "sys"); if (IS_ERR(hdev->sys_clk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(hdev->sys_clk); goto res_err; } - err = clk_prepare_enable(hdev->hash_clk); - if (err) - goto res_err; - - err = clk_prepare_enable(hdev->sys_clk); - if (err) - goto clk_err; - err = img_hash_dma_init(hdev); if (err) - goto dma_err; + goto res_err; dev_dbg(dev, "using %s for DMA transfers\n", dma_chan_name(hdev->dma_lch)); @@ -1032,10 +1024,6 @@ err_algs: list_del(&hdev->list); spin_unlock(&img_hash.lock); dma_release_channel(hdev->dma_lch); -dma_err: - clk_disable_unprepare(hdev->sys_clk); -clk_err: - clk_disable_unprepare(hdev->hash_clk); res_err: tasklet_kill(&hdev->done_task); tasklet_kill(&hdev->dma_task); @@ -1058,9 +1046,6 @@ static void img_hash_remove(struct platform_device *pdev) tasklet_kill(&hdev->dma_task); dma_release_channel(hdev->dma_lch); - - clk_disable_unprepare(hdev->hash_clk); - clk_disable_unprepare(hdev->sys_clk); } #ifdef CONFIG_PM_SLEEP From be9c336852056e2c34369de79fd938dc21a2d5cf Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu Date: Sat, 24 Aug 2024 02:38:56 +0800 Subject: [PATCH 73/96] crypto: hisilicon/zip - Optimize performance by replacing rw_lock with spinlock The req_lock is currently implemented as a rw_lock, but there are no instances where read_lock() is called. This means that the lock is effectively only used by writers, making it functionally equivalent to a simple spinlock. As stated in Documentation/locking/spinlocks.rst: "Reader-writer locks require more atomic memory operations than simple spinlocks. Unless the reader critical section is long, you are better off just using spinlocks." Since the rw_lock in this case incurs additional atomic memory operations without any benefit from reader-writer locking, it is more efficient to replace it with a spinlock. This patch implements that replacement to optimize the driver's performance. Signed-off-by: Kuan-Wei Chiu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_crypto.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 94e2d66b04b6..92d3bd0dfe1b 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -54,7 +54,7 @@ struct hisi_zip_req { struct hisi_zip_req_q { struct hisi_zip_req *q; unsigned long *req_bitmap; - rwlock_t req_lock; + spinlock_t req_lock; u16 size; }; @@ -116,17 +116,17 @@ static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx, struct hisi_zip_req *req_cache; int req_id; - write_lock(&req_q->req_lock); + spin_lock(&req_q->req_lock); req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); if (req_id >= req_q->size) { - write_unlock(&req_q->req_lock); + spin_unlock(&req_q->req_lock); dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); return ERR_PTR(-EAGAIN); } set_bit(req_id, req_q->req_bitmap); - write_unlock(&req_q->req_lock); + spin_unlock(&req_q->req_lock); req_cache = q + req_id; req_cache->req_id = req_id; @@ -140,9 +140,9 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, { struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - write_lock(&req_q->req_lock); + spin_lock(&req_q->req_lock); clear_bit(req->req_id, req_q->req_bitmap); - write_unlock(&req_q->req_lock); + spin_unlock(&req_q->req_lock); } static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) @@ -456,7 +456,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) goto err_free_comp_q; } - rwlock_init(&req_q->req_lock); + spin_lock_init(&req_q->req_lock); req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req), GFP_KERNEL); From b8fc70ab7b5f3afbc4fb0587782633d7fcf1e069 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 4 Sep 2024 07:09:51 +0800 Subject: [PATCH 74/96] Revert "crypto: spacc - Add SPAcc Skcipher support" This reverts the following commits: 87a3fcf5fec5fb59ec8f23d12a56bcf2b2ee6db7 58bf99100a6dfcc53ba4ab547f1394bb6873b2ac 3b1c9df662915a18a86f1a88364ee70875ed3b44 8bc1bfa02e37d63632f0cb65543e3e71acdccafb c32f08d024e275059474b3c11c1fc2bc7f2de990 f036dd566453176d4eafb9701ebd69e7e59d6707 c76c9ec333432088a1c6f52650c149530fc5df5d 5d22d37aa8b93efaad797faf80db40ea59453481 b63483b37e813299445d2719488acab2b3f20544 2d6213bd592b4731b53ece3492f9d1d18e97eb5e fc61c658c94cb7405ca6946d8f2a2b71cef49845 cb67c924b2a7b561bd7f4f2bd66766337c1007b7 06af76b46c78f4729fe2f9712a74502c90d87554 9f1a7ab4d31ef30fbf8adb0985300049469f2270 8ebb14deef0f374f7ca0d34a1ad720ba0a7b79f3 c8981d9230d808e62c65349d0b255c7f4b9087d6 They were submitted with no device tree bindings. Reported-by: Rob Herring Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 - drivers/crypto/Makefile | 1 - drivers/crypto/dwc-spacc/Kconfig | 95 - drivers/crypto/dwc-spacc/Makefile | 16 - drivers/crypto/dwc-spacc/spacc_aead.c | 1243 ---------- drivers/crypto/dwc-spacc/spacc_ahash.c | 914 ------- drivers/crypto/dwc-spacc/spacc_core.c | 2513 -------------------- drivers/crypto/dwc-spacc/spacc_core.h | 824 ------- drivers/crypto/dwc-spacc/spacc_device.c | 338 --- drivers/crypto/dwc-spacc/spacc_device.h | 231 -- drivers/crypto/dwc-spacc/spacc_hal.c | 367 --- drivers/crypto/dwc-spacc/spacc_hal.h | 114 - drivers/crypto/dwc-spacc/spacc_interrupt.c | 316 --- drivers/crypto/dwc-spacc/spacc_manager.c | 653 ----- drivers/crypto/dwc-spacc/spacc_skcipher.c | 717 ------ 15 files changed, 8343 deletions(-) delete mode 100644 drivers/crypto/dwc-spacc/Kconfig delete mode 100644 drivers/crypto/dwc-spacc/Makefile delete mode 100755 drivers/crypto/dwc-spacc/spacc_aead.c delete mode 100644 drivers/crypto/dwc-spacc/spacc_ahash.c delete mode 100644 drivers/crypto/dwc-spacc/spacc_core.c delete mode 100644 drivers/crypto/dwc-spacc/spacc_core.h delete mode 100644 drivers/crypto/dwc-spacc/spacc_device.c delete mode 100644 drivers/crypto/dwc-spacc/spacc_device.h delete mode 100644 drivers/crypto/dwc-spacc/spacc_hal.c delete mode 100644 drivers/crypto/dwc-spacc/spacc_hal.h delete mode 100644 drivers/crypto/dwc-spacc/spacc_interrupt.c delete mode 100644 drivers/crypto/dwc-spacc/spacc_manager.c delete mode 100644 drivers/crypto/dwc-spacc/spacc_skcipher.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 009cbd0e1993..94f23c6fc93b 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -696,7 +696,6 @@ config CRYPTO_DEV_BCM_SPU ahash, and aead algorithms with the kernel cryptographic API. source "drivers/crypto/stm32/Kconfig" -source "drivers/crypto/dwc-spacc/Kconfig" config CRYPTO_DEV_SAFEXCEL tristate "Inside Secure's SafeXcel cryptographic engine driver" diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index a937e8f5849b..ad4ccef67d12 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -48,7 +48,6 @@ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ obj-y += xilinx/ -obj-y += dwc-spacc/ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ diff --git a/drivers/crypto/dwc-spacc/Kconfig b/drivers/crypto/dwc-spacc/Kconfig deleted file mode 100644 index 9eb41a295f9d..000000000000 --- a/drivers/crypto/dwc-spacc/Kconfig +++ /dev/null @@ -1,95 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -config CRYPTO_DEV_SPACC - tristate "Support for dw_spacc Security protocol accelerators" - depends on HAS_DMA - default m - - help - This enables support for the HASH/CRYP/AEAD hw accelerator which can be found - on dw_spacc IP. - -config CRYPTO_DEV_SPACC_CIPHER - bool "Enable CIPHER functionality" - depends on CRYPTO_DEV_SPACC - default y - select CRYPTO_SKCIPHER - select CRYPTO_LIB_DES - select CRYPTO_AES - select CRYPTO_CBC - select CRYPTO_ECB - select CRYPTO_CTR - select CRYPTO_XTS - select CRYPTO_CTS - select CRYPTO_OFB - select CRYPTO_CFB - select CRYPTO_SM4_GENERIC - select CRYPTO_CHACHA20 - - help - Say y to enable Cipher functionality of SPACC. - -config CRYPTO_DEV_SPACC_HASH - bool "Enable HASH functionality" - depends on CRYPTO_DEV_SPACC - default y - select CRYPTO_HASH - select CRYPTO_SHA1 - select CRYPTO_MD5 - select CRYPTO_SHA256 - select CRYPTO_SHA512 - select CRYPTO_HMAC - select CRYPTO_SM3 - select CRYPTO_CMAC - select CRYPTO_MICHAEL_MIC - select CRYPTO_XCBC - select CRYPTO_AES - select CRYPTO_SM4_GENERIC - - help - Say y to enable Hash functionality of SPACC. - -config CRYPTO_DEV_SPACC_AEAD - bool "Enable AEAD functionality" - depends on CRYPTO_DEV_SPACC - default y - select CRYPTO_AEAD - select CRYPTO_AUTHENC - select CRYPTO_AES - select CRYPTO_SM4_GENERIC - select CRYPTO_CHACHAPOLY1305 - select CRYPTO_GCM - select CRYPTO_CCM - - help - Say y to enable AEAD functionality of SPACC. - -config CRYPTO_DEV_SPACC_AUTODETECT - bool "Enable Autodetect functionality" - depends on CRYPTO_DEV_SPACC - default y - help - Say y to enable Autodetect functionality - -config CRYPTO_DEV_SPACC_DEBUG_TRACE_IO - bool "Enable Trace MMIO reads/writes stats" - depends on CRYPTO_DEV_SPACC - default n - help - Say y to enable Trace MMIO reads/writes stats. - To Debug and trace IO register read/write opration - -config CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT - bool "Enable Trace DDT entries stats" - default n - depends on CRYPTO_DEV_SPACC - help - Say y to enable Enable Trace DDT entries stats. - To Debug and trace DDT opration - -config CRYPTO_DEV_SPACC_SECURE_MODE - bool "Enable Spacc secure mode stats" - default n - depends on CRYPTO_DEV_SPACC - help - Say y to enable Spacc secure modes stats. diff --git a/drivers/crypto/dwc-spacc/Makefile b/drivers/crypto/dwc-spacc/Makefile deleted file mode 100644 index bf46c8e13a31..000000000000 --- a/drivers/crypto/dwc-spacc/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_CRYPTO_DEV_SPACC) += snps-spacc.o -snps-spacc-objs = spacc_hal.o spacc_core.o \ -spacc_manager.o spacc_interrupt.o spacc_device.o - -ifeq ($(CONFIG_CRYPTO_DEV_SPACC_HASH),y) -snps-spacc-objs += spacc_ahash.o -endif - -ifeq ($(CONFIG_CRYPTO_DEV_SPACC_CIPHER),y) -snps-spacc-objs += spacc_skcipher.o -endif - -ifeq ($(CONFIG_CRYPTO_DEV_SPACC_AEAD),y) -snps-spacc-objs += spacc_aead.o -endif diff --git a/drivers/crypto/dwc-spacc/spacc_aead.c b/drivers/crypto/dwc-spacc/spacc_aead.c deleted file mode 100755 index 7f6c48881eab..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_aead.c +++ /dev/null @@ -1,1243 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "spacc_device.h" -#include "spacc_core.h" - -static LIST_HEAD(spacc_aead_alg_list); -static DEFINE_MUTEX(spacc_aead_alg_mutex); - -#define SPACC_B0_SIZE 16 -#define SET_IV_IN_SRCBUF 0x80000000 -#define SET_IV_IN_CONTEXT 0x0 -#define AAD_BUF_SIZE 4096 -#define ADATA_BUF_SIZE (AAD_BUF_SIZE + SPACC_B0_SIZE +\ - SPACC_MAX_IV_SIZE) - -struct spacc_iv_buf { - unsigned char iv[SPACC_MAX_IV_SIZE]; - unsigned char spacc_adata[ADATA_BUF_SIZE]; - struct scatterlist sg[2], spacc_adata_sg[2]; - struct scatterlist *spacc_ptextsg, temp_aad[2]; -}; - -static struct kmem_cache *spacc_iv_pool; - -static struct mode_tab possible_aeads[] = { - { MODE_TAB_AEAD("rfc7539(chacha20,poly1305)", - CRYPTO_MODE_CHACHA20_POLY1305, CRYPTO_MODE_NULL, - 16, 12, 1), .keylen = { 16, 24, 32 } - }, - { MODE_TAB_AEAD("gcm(aes)", - CRYPTO_MODE_AES_GCM, CRYPTO_MODE_NULL, - 16, 12, 1), .keylen = { 16, 24, 32 } - }, - { MODE_TAB_AEAD("gcm(sm4)", - CRYPTO_MODE_SM4_GCM, CRYPTO_MODE_NULL, - 16, 12, 1), .keylen = { 16 } - }, - { MODE_TAB_AEAD("ccm(aes)", - CRYPTO_MODE_AES_CCM, CRYPTO_MODE_NULL, - 16, 16, 1), .keylen = { 16, 24, 32 } - }, - { MODE_TAB_AEAD("ccm(sm4)", - CRYPTO_MODE_SM4_CCM, CRYPTO_MODE_NULL, - 16, 16, 1), .keylen = { 16, 24, 32 } - }, -}; - -static void spacc_init_aead_alg(struct crypto_alg *calg, - const struct mode_tab *mode) -{ - strscpy(calg->cra_name, mode->name, sizeof(mode->name) - 1); - calg->cra_name[sizeof(mode->name) - 1] = '\0'; - - strscpy(calg->cra_driver_name, "spacc-"); - strcat(calg->cra_driver_name, mode->name); - calg->cra_driver_name[sizeof(calg->cra_driver_name) - 1] = '\0'; - - calg->cra_blocksize = mode->blocklen; -} - -static int ccm_16byte_aligned_len(int in_len) -{ - int len; - int computed_mod; - - if (in_len > 0) { - computed_mod = in_len % 16; - if (computed_mod) - len = in_len - computed_mod + 16; - else - len = in_len; - } else { - len = in_len; - } - - return len; -} - -/* taken from crypto/ccm.c */ -static int spacc_aead_format_adata(u8 *adata, unsigned int a) -{ - int len = 0; - - /* add control info for associated data - * RFC 3610 and NIST Special Publication 800-38C - */ - if (a < 65280) { - *(__be16 *)adata = cpu_to_be16(a); - len = 2; - } else { - *(__be16 *)adata = cpu_to_be16(0xfffe); - *(__be32 *)&adata[2] = cpu_to_be32(a); - len = 6; - } - - return len; -} - - -/* taken from crypto/ccm.c */ -static int spacc_aead_set_msg_len(u8 *block, unsigned int msglen, int csize) -{ - __be32 data; - - memset(block, 0, csize); - block += csize; - - if (csize >= 4) - csize = 4; - else if (msglen > (unsigned int)(1 << (8 * csize))) - return -EOVERFLOW; - - data = cpu_to_be32(msglen); - memcpy(block - csize, (u8 *)&data + 4 - csize, csize); - - return 0; -} - -static int spacc_aead_init_dma(struct device *dev, struct aead_request *req, - u64 seq, uint32_t icvlen, int encrypt, int *alen) -{ - struct crypto_aead *reqtfm = crypto_aead_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_aead_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = aead_request_ctx(req); - - gfp_t mflags = GFP_ATOMIC; - struct spacc_iv_buf *iv; - int ccm_aad_16b_len = 0; - int rc, B0len; - int payload_len, spacc_adata_sg_buf_len; - unsigned int ivsize = crypto_aead_ivsize(reqtfm); - - /* always have 1 byte of IV */ - if (!ivsize) - ivsize = 1; - - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) - mflags = GFP_KERNEL; - - ctx->iv_buf = kmem_cache_alloc(spacc_iv_pool, mflags); - if (!ctx->iv_buf) - return -ENOMEM; - iv = ctx->iv_buf; - - sg_init_table(iv->sg, ARRAY_SIZE(iv->sg)); - sg_init_table(iv->spacc_adata_sg, ARRAY_SIZE(iv->spacc_adata_sg)); - - B0len = 0; - ctx->aead_nents = 0; - - memset(iv->iv, 0, SPACC_MAX_IV_SIZE); - memset(iv->spacc_adata, 0, ADATA_BUF_SIZE); - - /* copy the IV out for AAD */ - memcpy(iv->iv, req->iv, ivsize); - memset(iv->spacc_adata, 0, 144); - - /* now we need to figure out the cipher IV which may or - * may not be "req->iv" depending on the mode we are in - */ - if (tctx->mode & SPACC_MANGLE_IV_FLAG) { - switch (tctx->mode & 0x7F00) { - case SPACC_MANGLE_IV_RFC3686: - case SPACC_MANGLE_IV_RFC4106: - case SPACC_MANGLE_IV_RFC4543: - { - unsigned char *p = iv->spacc_adata; - /* we're in RFC3686 mode so the last - * 4 bytes of the key are the SALT - */ - memcpy(p, tctx->csalt, 4); - memcpy(p + 4, req->iv, ivsize); - - p[12] = 0; - p[13] = 0; - p[14] = 0; - p[15] = 1; - } - break; - case SPACC_MANGLE_IV_RFC4309: - { - unsigned char *p = iv->spacc_adata; - int L, M; - u32 lm = req->cryptlen; - - /* CCM mode */ - /* p[0..15] is the CTR IV */ - /* p[16..31] is the CBC-MAC B0 block*/ - B0len = SPACC_B0_SIZE; - /* IPsec requires L=4*/ - L = 4; - M = tctx->auth_size; - - /* CTR block */ - p[0] = L - 1; - memcpy(p + 1, tctx->csalt, 3); - memcpy(p + 4, req->iv, ivsize); - p[12] = 0; - p[13] = 0; - p[14] = 0; - p[15] = 1; - - /* store B0 block at p[16..31] */ - p[16] = (1 << 6) | (((M - 2) >> 1) << 3) - | (L - 1); - memcpy(p + 1 + 16, tctx->csalt, 3); - memcpy(p + 4 + 16, req->iv, ivsize); - - /* now store length */ - p[16 + 12 + 0] = (lm >> 24) & 0xFF; - p[16 + 12 + 1] = (lm >> 16) & 0xFF; - p[16 + 12 + 2] = (lm >> 8) & 0xFF; - p[16 + 12 + 3] = (lm) & 0xFF; - - /*now store the pre-formatted AAD */ - p[32] = (req->assoclen >> 8) & 0xFF; - p[33] = (req->assoclen) & 0xFF; - /* we added 2 byte header to the AAD */ - B0len += 2; - } - break; - } - } else if (tctx->mode == CRYPTO_MODE_AES_CCM || - tctx->mode == CRYPTO_MODE_SM4_CCM) { - unsigned char *p = iv->spacc_adata; - u8 *orig_iv = req->iv; - int L, M; - - u32 lm = (encrypt) ? - req->cryptlen : - req->cryptlen - tctx->auth_size; - - memset(iv->spacc_adata, 0, 144); - iv->spacc_ptextsg = req->src; - /* CCM mode */ - /* p[0..15] is the CTR IV */ - /* p[16..31] is the CBC-MAC B0 block*/ - B0len = SPACC_B0_SIZE; - - /* IPsec requires L=4 */ - L = req->iv[0] + 1; - M = tctx->auth_size; - - /* Note: rfc 3610 and NIST 800-38C require counter of - * zero to encrypt auth tag. - */ - memset(orig_iv + 15 - orig_iv[0], 0, orig_iv[0] + 1); - - /* CTR block */ - memcpy(p, req->iv, ivsize); - memcpy(p + 16, req->iv, ivsize); - - /* Taken from ccm.c - * Note: rfc 3610 and NIST 800-38C require counter of - * zero to encrypt auth tag. - */ - - /* Store B0 block at p[16..31] */ - p[16] |= (8 * ((M - 2) / 2)); - - /* set adata if assoclen > 0 */ - if (req->assoclen) - p[16] |= 64; - - /* now store length, this is L size starts from 16-L - * to 16 of B0 - */ - spacc_aead_set_msg_len(p + 16 + 16 - L, lm, L); - - if (req->assoclen) { - - /* store pre-formatted AAD: - * AAD_LEN + AAD + PAD - */ - *alen = spacc_aead_format_adata(&p[32], req->assoclen); - - ccm_aad_16b_len = - ccm_16byte_aligned_len(req->assoclen + *alen); - - /* Adding the rest of AAD from req->src */ - scatterwalk_map_and_copy(p + 32 + *alen, - req->src, 0, - req->assoclen, 0); - - /* Copy AAD to req->dst */ - scatterwalk_map_and_copy(p + 32 + *alen, req->dst, - 0, req->assoclen, 1); - - iv->spacc_ptextsg = scatterwalk_ffwd(iv->temp_aad, - req->src, req->assoclen); - } - /* default is to copy the iv over since the - * cipher and protocol IV are the same - */ - memcpy(iv->spacc_adata, req->iv, ivsize); - } - - /* this is part of the AAD */ - sg_set_buf(iv->sg, iv->iv, ivsize); - - /* GCM and CCM don't include the IV in the AAD */ - switch (tctx->mode) { - case CRYPTO_MODE_AES_GCM_RFC4106: - case CRYPTO_MODE_AES_GCM: - case CRYPTO_MODE_SM4_GCM_RFC8998: - case CRYPTO_MODE_CHACHA20_POLY1305: - case CRYPTO_MODE_NULL: - - payload_len = req->cryptlen + icvlen + req->assoclen; - spacc_adata_sg_buf_len = SPACC_MAX_IV_SIZE + B0len; - - /* this is the actual IV getting fed to the core - * (via IV IMPORT) - */ - - sg_set_buf(iv->spacc_adata_sg, iv->spacc_adata, - spacc_adata_sg_buf_len); - - sg_chain(iv->spacc_adata_sg, - sg_nents_for_len(iv->spacc_adata_sg, - spacc_adata_sg_buf_len) + 1, req->src); - - rc = spacc_sg_to_ddt(dev, iv->spacc_adata_sg, - spacc_adata_sg_buf_len + payload_len, - &ctx->src, DMA_TO_DEVICE); - - if (rc < 0) - goto err_free_iv; - ctx->aead_nents = rc; - break; - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_AES_CCM_RFC4309: - case CRYPTO_MODE_SM4_CCM: - - - if (encrypt) - payload_len = - ccm_16byte_aligned_len(req->cryptlen + icvlen); - else - payload_len = - ccm_16byte_aligned_len(req->cryptlen); - - spacc_adata_sg_buf_len = SPACC_MAX_IV_SIZE + B0len + - ccm_aad_16b_len; - - /* this is the actual IV getting fed to the core (via IV IMPORT) - * This has CTR IV + B0 + AAD(B1, B2, ...) - */ - sg_set_buf(iv->spacc_adata_sg, iv->spacc_adata, - spacc_adata_sg_buf_len); - sg_chain(iv->spacc_adata_sg, - sg_nents_for_len(iv->spacc_adata_sg, - spacc_adata_sg_buf_len) + 1, - iv->spacc_ptextsg); - - rc = spacc_sg_to_ddt(dev, iv->spacc_adata_sg, - spacc_adata_sg_buf_len + payload_len, - &ctx->src, DMA_TO_DEVICE); - if (rc < 0) - goto err_free_iv; - ctx->aead_nents = rc; - break; - default: - - /* this is the actual IV getting fed to the core (via IV IMPORT) - * This has CTR IV + B0 + AAD(B1, B2, ...) - */ - payload_len = req->cryptlen + icvlen + req->assoclen; - spacc_adata_sg_buf_len = SPACC_MAX_IV_SIZE + B0len; - sg_set_buf(iv->spacc_adata_sg, iv->spacc_adata, - spacc_adata_sg_buf_len); - - sg_chain(iv->spacc_adata_sg, - sg_nents_for_len(iv->spacc_adata_sg, - spacc_adata_sg_buf_len) + 1, - req->src); - - rc = spacc_sg_to_ddt(dev, iv->spacc_adata_sg, - spacc_adata_sg_buf_len + payload_len, - &ctx->src, DMA_TO_DEVICE); - - if (rc < 0) - goto err_free_iv; - ctx->aead_nents = rc; - } - - /* Putting in req->dst is good since it won't overwrite anything - * even in case of CCM this is fine condition - */ - if (req->dst != req->src) { - switch (tctx->mode) { - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_AES_CCM_RFC4309: - case CRYPTO_MODE_SM4_CCM: - /* If req->dst buffer len is not-positive, - * then skip setting up of DMA - */ - if (req->dst->length <= 0) { - ctx->dst_nents = 0; - return 0; - } - - if (encrypt) - payload_len = req->cryptlen + icvlen + - req->assoclen; - else - payload_len = req->cryptlen - tctx->auth_size + - req->assoclen; - - /* For corner cases where PTlen=AADlen=0, we set default - * to 16 - */ - rc = spacc_sg_to_ddt(dev, req->dst, - payload_len > 0 ? payload_len : 16, - &ctx->dst, DMA_FROM_DEVICE); - if (rc < 0) - goto err_free_src; - ctx->dst_nents = rc; - break; - default: - - /* If req->dst buffer len is not-positive, - * then skip setting up of DMA - */ - if (req->dst->length <= 0) { - ctx->dst_nents = 0; - return 0; - } - - if (encrypt) - payload_len = SPACC_MAX_IV_SIZE + req->cryptlen - + icvlen + req->assoclen; - else { - payload_len = req->cryptlen - tctx->auth_size + - req->assoclen; - if (payload_len <= 0) - return -EBADMSG; - } - - - rc = spacc_sg_to_ddt(dev, req->dst, - payload_len > 0 ? payload_len : 16, - &ctx->dst, DMA_FROM_DEVICE); - if (rc < 0) - goto err_free_src; - ctx->dst_nents = rc; - } - } - - return 0; - -err_free_src: - if (ctx->aead_nents) { - dma_unmap_sg(dev, iv->spacc_adata_sg, ctx->aead_nents, - DMA_TO_DEVICE); - - pdu_ddt_free(&ctx->src); - } - -err_free_iv: - kmem_cache_free(spacc_iv_pool, ctx->iv_buf); - - return rc; -} - -static void spacc_aead_cleanup_dma(struct device *dev, struct aead_request *req) -{ - struct spacc_crypto_reqctx *ctx = aead_request_ctx(req); - struct spacc_iv_buf *iv = ctx->iv_buf; - - if (req->src != req->dst && ctx->dst_nents > 0) { - dma_unmap_sg(dev, req->dst, ctx->dst_nents, - DMA_FROM_DEVICE); - pdu_ddt_free(&ctx->dst); - } - - if (ctx->aead_nents) { - dma_unmap_sg(dev, iv->spacc_adata_sg, ctx->aead_nents, - DMA_TO_DEVICE); - - pdu_ddt_free(&ctx->src); - } - - kmem_cache_free(spacc_iv_pool, ctx->iv_buf); -} - -static bool spacc_check_keylen(const struct spacc_alg *salg, - unsigned int keylen) -{ - unsigned int i, mask = salg->keylen_mask; - - if (mask > (1ul << ARRAY_SIZE(salg->mode->keylen)) - 1) - return false; - - for (i = 0; mask; i++, mask >>= 1) { - if (mask & 1 && salg->mode->keylen[i] == keylen) - return true; - } - - return false; -} - -static void spacc_aead_cb(void *spacc, void *tfm) -{ - struct aead_cb_data *cb = tfm; - int err = -1; - u32 status_reg = readl(cb->spacc->regmap + SPACC_REG_STATUS); - u32 status_ret = (status_reg >> 24) & 0x3; - - dma_sync_sg_for_cpu(cb->tctx->dev, cb->req->dst, - cb->ctx->dst_nents, DMA_FROM_DEVICE); - - /* ICV mismatch send bad msg */ - if (status_ret == 0x1) { - err = -EBADMSG; - goto REQ_DST_CP_SKIP; - } - err = cb->spacc->job[cb->new_handle].job_err; - -REQ_DST_CP_SKIP: - spacc_aead_cleanup_dma(cb->tctx->dev, cb->req); - spacc_close(cb->spacc, cb->new_handle); - - /* call complete */ - aead_request_complete(cb->req, err); -} - -static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, - unsigned int keylen) -{ - struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); - const struct spacc_alg *salg = spacc_tfm_aead(&tfm->base); - struct crypto_authenc_keys authenc_keys; - struct spacc_priv *priv; - unsigned int authkeylen, enckeylen; - const unsigned char *authkey, *enckey; - unsigned char xcbc[64]; - int singlekey = 0; - int err; - - /* are keylens valid? */ - ctx->ctx_valid = false; - - switch (ctx->mode & 0xFF) { - case CRYPTO_MODE_SM4_GCM: - case CRYPTO_MODE_SM4_CCM: - case CRYPTO_MODE_NULL: - case CRYPTO_MODE_AES_GCM: - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_CHACHA20_POLY1305: - authkey = key; - authkeylen = 0; - enckey = key; - enckeylen = keylen; - ctx->keylen = keylen; - singlekey = 1; - goto skipover; - } - - err = crypto_authenc_extractkeys(&authenc_keys, key, keylen); - if (err) - return err; - - authkeylen = authenc_keys.authkeylen; - authkey = authenc_keys.authkey; - enckeylen = authenc_keys.enckeylen; - enckey = authenc_keys.enckey; - -skipover: - /* detect RFC3686/4106 and trim from enckeylen(and copy salt..) */ - if (ctx->mode & SPACC_MANGLE_IV_FLAG) { - switch (ctx->mode & 0x7F00) { - case SPACC_MANGLE_IV_RFC3686: - case SPACC_MANGLE_IV_RFC4106: - case SPACC_MANGLE_IV_RFC4543: - memcpy(ctx->csalt, enckey + enckeylen - 4, 4); - enckeylen -= 4; - break; - case SPACC_MANGLE_IV_RFC4309: - memcpy(ctx->csalt, enckey + enckeylen - 3, 3); - enckeylen -= 3; - break; - } - } - - if (!singlekey) { - if (authkeylen > salg->mode->hashlen) { - dev_warn(ctx->dev, "Auth key size of %u is not valid\n", - authkeylen); - return -EINVAL; - } - } - - if (!spacc_check_keylen(salg, enckeylen)) { - dev_warn(ctx->dev, "Enc key size of %u is not valid\n", - enckeylen); - return -EINVAL; - } - - /* if we're already open close the handle since - * the size may have changed - */ - if (ctx->handle != -1) { - priv = dev_get_drvdata(ctx->dev); - spacc_close(&priv->spacc, ctx->handle); - put_device(ctx->dev); - ctx->handle = -1; - } - - /* Open a handle and - * search all devices for an open handle - */ - priv = NULL; - priv = dev_get_drvdata(salg->dev[0]); - - /* increase reference */ - ctx->dev = get_device(salg->dev[0]); - - /* check if its a valid mode ... */ - if (spacc_isenabled(&priv->spacc, salg->mode->aead.ciph & 0xFF, - enckeylen) && - spacc_isenabled(&priv->spacc, - salg->mode->aead.hash & 0xFF, authkeylen)) { - /* try to open spacc handle */ - ctx->handle = spacc_open(&priv->spacc, - salg->mode->aead.ciph & 0xFF, - salg->mode->aead.hash & 0xFF, - -1, 0, spacc_aead_cb, tfm); - } - - if (ctx->handle < 0) { - put_device(salg->dev[0]); - pr_debug("Failed to open SPAcc context\n"); - return -EIO; - } - - /* setup XCBC key */ - if (salg->mode->aead.hash == CRYPTO_MODE_MAC_XCBC) { - err = spacc_compute_xcbc_key(&priv->spacc, - salg->mode->aead.hash, - ctx->handle, authkey, - authkeylen, xcbc); - if (err < 0) { - dev_warn(ctx->dev, "Failed to compute XCBC key: %d\n", - err); - return -EIO; - } - authkey = xcbc; - authkeylen = 48; - } - - /* handle zero key/zero len DEC condition for SM4/AES GCM mode */ - ctx->zero_key = 0; - if (!key[0]) { - int i, val = 0; - - for (i = 0; i < keylen ; i++) - val += key[i]; - - if (val == 0) - ctx->zero_key = 1; - } - - err = spacc_write_context(&priv->spacc, ctx->handle, - SPACC_CRYPTO_OPERATION, enckey, - enckeylen, NULL, 0); - - if (err) { - dev_warn(ctx->dev, - "Could not write ciphering context: %d\n", err); - return -EIO; - } - - if (!singlekey) { - err = spacc_write_context(&priv->spacc, ctx->handle, - SPACC_HASH_OPERATION, authkey, - authkeylen, NULL, 0); - if (err) { - dev_warn(ctx->dev, - "Could not write hashing context: %d\n", err); - return -EIO; - } - } - - /* set expand key */ - spacc_set_key_exp(&priv->spacc, ctx->handle); - ctx->ctx_valid = true; - - memset(xcbc, 0, sizeof(xcbc)); - - /* copy key to ctx for fallback */ - memcpy(ctx->key, key, keylen); - - return 0; -} - -static int spacc_aead_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); - - ctx->auth_size = authsize; - - /* taken from crypto/ccm.c */ - switch (ctx->mode) { - case CRYPTO_MODE_SM4_GCM: - case CRYPTO_MODE_AES_GCM: - switch (authsize) { - case 4: - case 8: - case 12: - case 13: - case 14: - case 15: - case 16: - break; - default: - return -EINVAL; - } - break; - - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_SM4_CCM: - switch (authsize) { - case 4: - case 6: - case 8: - case 10: - case 12: - case 14: - case 16: - break; - default: - return -EINVAL; - } - break; - - case CRYPTO_MODE_CHACHA20_POLY1305: - switch (authsize) { - case 16: - break; - default: - return -EINVAL; - } - break; - } - - return 0; -} - -static int spacc_aead_fallback(struct aead_request *req, - struct spacc_crypto_ctx *ctx, int encrypt) -{ - int ret; - struct aead_request *subreq = aead_request_ctx(req); - struct crypto_aead *reqtfm = crypto_aead_reqtfm(req); - struct aead_alg *alg = crypto_aead_alg(reqtfm); - const char *aead_name = alg->base.cra_name; - - ctx->fb.aead = crypto_alloc_aead(aead_name, 0, - CRYPTO_ALG_NEED_FALLBACK | - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->fb.aead)) { - pr_err("Spacc aead fallback tfm is NULL!\n"); - return PTR_ERR(ctx->fb.aead); - } - - subreq = aead_request_alloc(ctx->fb.aead, GFP_KERNEL); - if (!subreq) - return -ENOMEM; - - crypto_aead_setkey(ctx->fb.aead, ctx->key, ctx->keylen); - crypto_aead_setauthsize(ctx->fb.aead, ctx->auth_size); - - aead_request_set_tfm(subreq, ctx->fb.aead); - aead_request_set_callback(subreq, req->base.flags, - req->base.complete, req->base.data); - aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, - req->iv); - aead_request_set_ad(subreq, req->assoclen); - - if (encrypt) - ret = crypto_aead_encrypt(subreq); - else - ret = crypto_aead_decrypt(subreq); - - aead_request_free(subreq); - crypto_free_aead(ctx->fb.aead); - ctx->fb.aead = NULL; - - return ret; -} - -static int spacc_aead_process(struct aead_request *req, u64 seq, int encrypt) -{ - int rc; - int B0len; - int alen; - u32 dstoff; - int icvremove; - int ivaadsize; - int ptaadsize = 0; - int iv_to_context; - int spacc_proc_len; - u32 spacc_icv_offset = 0; - int spacc_pre_aad_size; - int ccm_aad_16b_len; - struct crypto_aead *reqtfm = crypto_aead_reqtfm(req); - int ivsize = crypto_aead_ivsize(reqtfm); - struct spacc_crypto_ctx *tctx = crypto_aead_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = aead_request_ctx(req); - struct spacc_priv *priv = dev_get_drvdata(tctx->dev); - - ctx->encrypt_op = encrypt; - alen = 0; - ccm_aad_16b_len = 0; - - if (tctx->handle < 0 || !tctx->ctx_valid || (req->cryptlen + - req->assoclen) > priv->max_msg_len) - return -EINVAL; - - /* IV is programmed to context by default */ - iv_to_context = SET_IV_IN_CONTEXT; - - if (encrypt) { - switch (tctx->mode & 0xFF) { - case CRYPTO_MODE_AES_GCM: - case CRYPTO_MODE_SM4_GCM: - case CRYPTO_MODE_CHACHA20_POLY1305: - /* For cryptlen = 0 */ - if (req->cryptlen + req->assoclen == 0) - return spacc_aead_fallback(req, tctx, encrypt); - break; - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_SM4_CCM: - - if (req->cryptlen + req->assoclen == 0) - return spacc_aead_fallback(req, tctx, encrypt); - - /* verify that msglen can in fact be represented - * in L bytes - */ - /* 2 <= L <= 8, so 1 <= L' <= 7. */ - if (req->iv[0] < 1 || req->iv[0] > 7) - return -EINVAL; - - break; - default: - pr_debug("Unsupported algo"); - return -EINVAL; - } - } else { - /* Handle the decryption */ - switch (tctx->mode & 0xFF) { - case CRYPTO_MODE_AES_GCM: - case CRYPTO_MODE_SM4_GCM: - case CRYPTO_MODE_CHACHA20_POLY1305: - /* For assoclen = 0 */ - if (req->assoclen == 0 && - (req->cryptlen - tctx->auth_size == 0)) - return spacc_aead_fallback(req, tctx, encrypt); - break; - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_SM4_CCM: - - if (req->assoclen == 0 && - (req->cryptlen - tctx->auth_size == 0)) - return spacc_aead_fallback(req, tctx, encrypt); - /* 2 <= L <= 8, so 1 <= L' <= 7. */ - if (req->iv[0] < 1 || req->iv[0] > 7) - return -EINVAL; - break; - default: - pr_debug("Unsupported algo"); - return -EINVAL; - } - } - - icvremove = (encrypt) ? 0 : tctx->auth_size; - - rc = spacc_aead_init_dma(tctx->dev, req, seq, (encrypt) ? - tctx->auth_size : 0, encrypt, &alen); - if (rc < 0) - return -EINVAL; - - if (req->assoclen) - ccm_aad_16b_len = ccm_16byte_aligned_len(req->assoclen + alen); - - /* Note: This won't work if IV_IMPORT has been disabled */ - ctx->cb.new_handle = spacc_clone_handle(&priv->spacc, tctx->handle, - &ctx->cb); - if (ctx->cb.new_handle < 0) { - spacc_aead_cleanup_dma(tctx->dev, req); - return -EINVAL; - } - - ctx->cb.tctx = tctx; - ctx->cb.ctx = ctx; - ctx->cb.req = req; - ctx->cb.spacc = &priv->spacc; - - /* Write IV to the spacc-context - * IV can be written to context or as part of the input src buffer - * IV in case of CCM is going in the input src buff. - * IV for GCM is written to the context. - */ - if (tctx->mode == CRYPTO_MODE_AES_GCM_RFC4106 || - tctx->mode == CRYPTO_MODE_AES_GCM || - tctx->mode == CRYPTO_MODE_SM4_GCM_RFC8998 || - tctx->mode == CRYPTO_MODE_CHACHA20_POLY1305 || - tctx->mode == CRYPTO_MODE_NULL) { - iv_to_context = SET_IV_IN_CONTEXT; - rc = spacc_write_context(&priv->spacc, ctx->cb.new_handle, - SPACC_CRYPTO_OPERATION, NULL, 0, - req->iv, ivsize); - - if (rc < 0) { - spacc_aead_cleanup_dma(tctx->dev, req); - spacc_close(&priv->spacc, ctx->cb.new_handle); - return -EINVAL; - } - } - - /* CCM and GCM don't include the IV in the AAD */ - if (tctx->mode == CRYPTO_MODE_AES_GCM_RFC4106 || - tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || - tctx->mode == CRYPTO_MODE_AES_GCM || - tctx->mode == CRYPTO_MODE_AES_CCM || - tctx->mode == CRYPTO_MODE_SM4_CCM || - tctx->mode == CRYPTO_MODE_SM4_GCM_RFC8998 || - tctx->mode == CRYPTO_MODE_CHACHA20_POLY1305 || - tctx->mode == CRYPTO_MODE_NULL) { - ivaadsize = 0; - } else { - ivaadsize = ivsize; - } - - /* CCM requires an extra block of AAD */ - if (tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || - tctx->mode == CRYPTO_MODE_AES_CCM || - tctx->mode == CRYPTO_MODE_SM4_CCM) - B0len = SPACC_B0_SIZE; - else - B0len = 0; - - /* GMAC mode uses AAD for the entire message. - * So does NULL cipher - */ - if (tctx->mode == CRYPTO_MODE_AES_GCM_RFC4543 || - tctx->mode == CRYPTO_MODE_NULL) { - if (req->cryptlen >= icvremove) - ptaadsize = req->cryptlen - icvremove; - } - - /* Calculate and set the below, important parameters - * spacc icv offset - spacc_icv_offset - * destination offset - dstoff - * IV to context - This is set for CCM, not set for GCM - */ - if (req->dst == req->src) { - dstoff = ((uint32_t)(SPACC_MAX_IV_SIZE + B0len + - req->assoclen + ivaadsize)); - - /* CCM case */ - if (tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || - tctx->mode == CRYPTO_MODE_AES_CCM || - tctx->mode == CRYPTO_MODE_SM4_CCM) { - iv_to_context = SET_IV_IN_SRCBUF; - dstoff = ((uint32_t)(SPACC_MAX_IV_SIZE + B0len + - ccm_aad_16b_len + ivaadsize)); - } - - } else { - dstoff = ((uint32_t)(req->assoclen + ivaadsize)); - - /* CCM case */ - if (tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || - tctx->mode == CRYPTO_MODE_AES_CCM || - tctx->mode == CRYPTO_MODE_SM4_CCM) { - iv_to_context = SET_IV_IN_SRCBUF; - dstoff = ((uint32_t)(req->assoclen + ivaadsize)); - - } - } - - /* Calculate and set the below, important parameters - * spacc proc_len - spacc_proc_len - * pre-AAD size - spacc_pre_aad_size - */ - if (tctx->mode == CRYPTO_MODE_AES_CCM || - tctx->mode == CRYPTO_MODE_SM4_CCM || - tctx->mode == CRYPTO_MODE_AES_CCM_RFC4309 || - tctx->mode == CRYPTO_MODE_SM4_CCM_RFC8998) { - spacc_proc_len = B0len + ccm_aad_16b_len - + req->cryptlen + ivaadsize - - icvremove; - spacc_pre_aad_size = B0len + ccm_aad_16b_len - + ivaadsize + ptaadsize; - - } else { - spacc_proc_len = B0len + req->assoclen - + req->cryptlen - icvremove - + ivaadsize; - spacc_pre_aad_size = B0len + req->assoclen - + ivaadsize + ptaadsize; - } - - rc = spacc_set_operation(&priv->spacc, - ctx->cb.new_handle, - encrypt ? OP_ENCRYPT : OP_DECRYPT, - ICV_ENCRYPT_HASH, IP_ICV_APPEND, - spacc_icv_offset, - tctx->auth_size, 0); - - rc = spacc_packet_enqueue_ddt(&priv->spacc, ctx->cb.new_handle, - &ctx->src, - (req->dst == req->src) ? &ctx->src : - &ctx->dst, spacc_proc_len, - (dstoff << SPACC_OFFSET_DST_O) | - SPACC_MAX_IV_SIZE, - spacc_pre_aad_size, - 0, iv_to_context, 0); - - if (rc < 0) { - spacc_aead_cleanup_dma(tctx->dev, req); - spacc_close(&priv->spacc, ctx->cb.new_handle); - - if (rc != -EBUSY) { - dev_err(tctx->dev, " failed to enqueue job, ERR: %d\n", - rc); - } - - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; - - return -EINVAL; - } - - /* At this point the job is in flight to the engine ... remove first use - * so subsequent calls don't expand the key again... ideally we would - * pump a dummy job through the engine to pre-expand the key so that by - * the time setkey was done we wouldn't have to do this - */ - priv->spacc.job[tctx->handle].first_use = 0; - priv->spacc.job[tctx->handle].ctrl &= ~(1UL - << priv->spacc.config.ctrl_map[SPACC_CTRL_KEY_EXP]); - - return -EINPROGRESS; -} - -static int spacc_aead_encrypt(struct aead_request *req) -{ - return spacc_aead_process(req, 0ULL, 1); -} - -static int spacc_aead_decrypt(struct aead_request *req) -{ - return spacc_aead_process(req, 0ULL, 0); -} - -static int spacc_aead_init(struct crypto_aead *tfm) -{ - struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); - const struct spacc_alg *salg = spacc_tfm_aead(&tfm->base); - - crypto_aead_set_reqsize(tfm, sizeof(struct spacc_crypto_reqctx)); - - ctx->zero_key = 0; - ctx->fb.aead = NULL; - ctx->handle = -1; - ctx->mode = salg->mode->aead.ciph; - ctx->dev = get_device(salg->dev[0]); - - return 0; -} - -static void spacc_aead_exit(struct crypto_aead *tfm) -{ - struct spacc_crypto_ctx *ctx = crypto_aead_ctx(tfm); - struct spacc_priv *priv = dev_get_drvdata(ctx->dev); - - ctx->fb.aead = NULL; - /* close spacc handle */ - if (ctx->handle >= 0) { - spacc_close(&priv->spacc, ctx->handle); - ctx->handle = -1; - } - - put_device(ctx->dev); -} - -static struct aead_alg spacc_aead_algs = { - .setkey = spacc_aead_setkey, - .setauthsize = spacc_aead_setauthsize, - .encrypt = spacc_aead_encrypt, - .decrypt = spacc_aead_decrypt, - .init = spacc_aead_init, - .exit = spacc_aead_exit, - - .base.cra_priority = 300, - .base.cra_module = THIS_MODULE, - .base.cra_ctxsize = sizeof(struct spacc_crypto_ctx), - .base.cra_flags = CRYPTO_ALG_TYPE_AEAD - | CRYPTO_ALG_ASYNC - | CRYPTO_ALG_NEED_FALLBACK - | CRYPTO_ALG_KERN_DRIVER_ONLY - | CRYPTO_ALG_OPTIONAL_KEY -}; - -static int spacc_register_aead(unsigned int aead_mode, - struct platform_device *spacc_pdev) -{ - int rc; - struct spacc_alg *salg; - - salg = kmalloc(sizeof(*salg), GFP_KERNEL); - if (!salg) - return -ENOMEM; - - salg->mode = &possible_aeads[aead_mode]; - salg->dev[0] = &spacc_pdev->dev; - salg->dev[1] = NULL; - salg->calg = &salg->alg.aead.base; - salg->alg.aead = spacc_aead_algs; - - spacc_init_aead_alg(salg->calg, salg->mode); - - salg->alg.aead.ivsize = salg->mode->ivlen; - salg->alg.aead.maxauthsize = salg->mode->hashlen; - salg->alg.aead.base.cra_blocksize = salg->mode->blocklen; - - salg->keylen_mask = possible_aeads[aead_mode].keylen_mask; - - if (salg->mode->aead.ciph & SPACC_MANGLE_IV_FLAG) { - switch (salg->mode->aead.ciph & 0x7F00) { - case SPACC_MANGLE_IV_RFC3686: /*CTR*/ - case SPACC_MANGLE_IV_RFC4106: /*GCM*/ - case SPACC_MANGLE_IV_RFC4543: /*GMAC*/ - case SPACC_MANGLE_IV_RFC4309: /*CCM*/ - case SPACC_MANGLE_IV_RFC8998: /*GCM/CCM*/ - salg->alg.aead.ivsize = 12; - break; - } - } - - rc = crypto_register_aead(&salg->alg.aead); - if (rc < 0) { - kfree(salg); - return rc; - } - - dev_dbg(salg->dev[0], "Registered %s\n", salg->mode->name); - - mutex_lock(&spacc_aead_alg_mutex); - list_add(&salg->list, &spacc_aead_alg_list); - mutex_unlock(&spacc_aead_alg_mutex); - - return 0; -} - -int probe_aeads(struct platform_device *spacc_pdev) -{ - int err; - unsigned int x, y; - struct spacc_priv *priv = NULL; - - size_t alloc_size = max_t(unsigned long, - roundup_pow_of_two(sizeof(struct spacc_iv_buf)), - dma_get_cache_alignment()); - - spacc_iv_pool = kmem_cache_create("spacc-aead-iv", alloc_size, - alloc_size, 0, NULL); - - if (!spacc_iv_pool) - return -ENOMEM; - - for (x = 0; x < ARRAY_SIZE(possible_aeads); x++) { - possible_aeads[x].keylen_mask = 0; - possible_aeads[x].valid = 0; - } - - /* compute cipher key masks (over all devices) */ - priv = dev_get_drvdata(&spacc_pdev->dev); - - for (x = 0; x < ARRAY_SIZE(possible_aeads); x++) { - for (y = 0; y < ARRAY_SIZE(possible_aeads[x].keylen); y++) { - if (spacc_isenabled(&priv->spacc, - possible_aeads[x].aead.ciph & 0xFF, - possible_aeads[x].keylen[y])) - possible_aeads[x].keylen_mask |= 1u << y; - } - } - - /* scan for combined modes */ - priv = dev_get_drvdata(&spacc_pdev->dev); - - for (x = 0; x < ARRAY_SIZE(possible_aeads); x++) { - if (!possible_aeads[x].valid && possible_aeads[x].keylen_mask) { - if (spacc_isenabled(&priv->spacc, - possible_aeads[x].aead.hash & 0xFF, - possible_aeads[x].hashlen)) { - - possible_aeads[x].valid = 1; - err = spacc_register_aead(x, spacc_pdev); - if (err < 0) - goto error; - } - } - } - - return 0; - -error: - return err; -} - -int spacc_unregister_aead_algs(void) -{ - struct spacc_alg *salg, *tmp; - - mutex_lock(&spacc_aead_alg_mutex); - - list_for_each_entry_safe(salg, tmp, &spacc_aead_alg_list, list) { - crypto_unregister_alg(salg->calg); - list_del(&salg->list); - kfree(salg); - } - - mutex_unlock(&spacc_aead_alg_mutex); - - kmem_cache_destroy(spacc_iv_pool); - - return 0; -} diff --git a/drivers/crypto/dwc-spacc/spacc_ahash.c b/drivers/crypto/dwc-spacc/spacc_ahash.c deleted file mode 100644 index ed63855d4931..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_ahash.c +++ /dev/null @@ -1,914 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "spacc_device.h" -#include "spacc_core.h" - -#define PPP_BUF_SIZE 128 - -struct sdesc { - struct shash_desc shash; - char ctx[]; -}; - -static struct dma_pool *spacc_hash_pool; -static LIST_HEAD(spacc_hash_alg_list); -static LIST_HEAD(head_sglbuf); -static DEFINE_MUTEX(spacc_hash_alg_mutex); - -static struct mode_tab possible_hashes[] = { - { .keylen[0] = 16, MODE_TAB_HASH("cmac(aes)", MAC_CMAC, 16, 16), - .sw_fb = true }, - { .keylen[0] = 48 | MODE_TAB_HASH_XCBC, MODE_TAB_HASH("xcbc(aes)", - MAC_XCBC, 16, 16), .sw_fb = true }, - - { MODE_TAB_HASH("cmac(sm4)", MAC_SM4_CMAC, 16, 16), .sw_fb = true }, - { .keylen[0] = 32 | MODE_TAB_HASH_XCBC, MODE_TAB_HASH("xcbc(sm4)", - MAC_SM4_XCBC, 16, 16), .sw_fb = true }, - - { MODE_TAB_HASH("hmac(md5)", HMAC_MD5, MD5_DIGEST_SIZE, - MD5_HMAC_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("md5", HASH_MD5, MD5_DIGEST_SIZE, - MD5_HMAC_BLOCK_SIZE), .sw_fb = true }, - - { MODE_TAB_HASH("hmac(sha1)", HMAC_SHA1, SHA1_DIGEST_SIZE, - SHA1_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("sha1", HASH_SHA1, SHA1_DIGEST_SIZE, - SHA1_BLOCK_SIZE), .sw_fb = true }, - - { MODE_TAB_HASH("sha224", HASH_SHA224, SHA224_DIGEST_SIZE, - SHA224_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("sha256", HASH_SHA256, SHA256_DIGEST_SIZE, - SHA256_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("sha384", HASH_SHA384, SHA384_DIGEST_SIZE, - SHA384_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("sha512", HASH_SHA512, SHA512_DIGEST_SIZE, - SHA512_BLOCK_SIZE), .sw_fb = true }, - - { MODE_TAB_HASH("hmac(sha512)", HMAC_SHA512, SHA512_DIGEST_SIZE, - SHA512_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("hmac(sha224)", HMAC_SHA224, SHA224_DIGEST_SIZE, - SHA224_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("hmac(sha256)", HMAC_SHA256, SHA256_DIGEST_SIZE, - SHA256_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("hmac(sha384)", HMAC_SHA384, SHA384_DIGEST_SIZE, - SHA384_BLOCK_SIZE), .sw_fb = true }, - - { MODE_TAB_HASH("sha3-224", HASH_SHA3_224, SHA3_224_DIGEST_SIZE, - SHA3_224_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("sha3-256", HASH_SHA3_256, SHA3_256_DIGEST_SIZE, - SHA3_256_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("sha3-384", HASH_SHA3_384, SHA3_384_DIGEST_SIZE, - SHA3_384_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("sha3-512", HASH_SHA3_512, SHA3_512_DIGEST_SIZE, - SHA3_512_BLOCK_SIZE), .sw_fb = true }, - - { MODE_TAB_HASH("hmac(sm3)", HMAC_SM3, SM3_DIGEST_SIZE, - SM3_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("sm3", HASH_SM3, SM3_DIGEST_SIZE, - SM3_BLOCK_SIZE), .sw_fb = true }, - { MODE_TAB_HASH("michael_mic", MAC_MICHAEL, 8, 8), .sw_fb = true }, -}; - -static void spacc_hash_cleanup_dma_dst(struct spacc_crypto_ctx *tctx, - struct ahash_request *req) -{ - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - - pdu_ddt_free(&ctx->dst); -} - -static void spacc_hash_cleanup_dma_src(struct spacc_crypto_ctx *tctx, - struct ahash_request *req) -{ - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - - if (tctx->tmp_sgl && tctx->tmp_sgl[0].length != 0) { - dma_unmap_sg(tctx->dev, tctx->tmp_sgl, ctx->src_nents, - DMA_TO_DEVICE); - kfree(tctx->tmp_sgl_buff); - tctx->tmp_sgl_buff = NULL; - tctx->tmp_sgl[0].length = 0; - } else { - dma_unmap_sg(tctx->dev, req->src, ctx->src_nents, - DMA_TO_DEVICE); - } - - pdu_ddt_free(&ctx->src); -} - -static void spacc_hash_cleanup_dma(struct device *dev, - struct ahash_request *req) -{ - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - - dma_unmap_sg(dev, req->src, ctx->src_nents, DMA_TO_DEVICE); - pdu_ddt_free(&ctx->src); - - dma_pool_free(spacc_hash_pool, ctx->digest_buf, ctx->digest_dma); - pdu_ddt_free(&ctx->dst); -} - -static void spacc_init_calg(struct crypto_alg *calg, - const struct mode_tab *mode) -{ - - strscpy(calg->cra_name, mode->name); - calg->cra_name[sizeof(mode->name) - 1] = '\0'; - - strscpy(calg->cra_driver_name, "spacc-"); - strcat(calg->cra_driver_name, mode->name); - calg->cra_driver_name[sizeof(calg->cra_driver_name) - 1] = '\0'; - - calg->cra_blocksize = mode->blocklen; -} - -static int spacc_ctx_clone_handle(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - struct spacc_priv *priv = dev_get_drvdata(tctx->dev); - - if (tctx->handle < 0) - return -ENXIO; - - ctx->acb.new_handle = spacc_clone_handle(&priv->spacc, tctx->handle, - &ctx->acb); - - if (ctx->acb.new_handle < 0) { - spacc_hash_cleanup_dma(tctx->dev, req); - return -ENOMEM; - } - - ctx->acb.tctx = tctx; - ctx->acb.ctx = ctx; - ctx->acb.req = req; - ctx->acb.spacc = &priv->spacc; - - return 0; -} - -static int spacc_hash_init_dma(struct device *dev, struct ahash_request *req, - int final) -{ - int rc = -1; - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - - gfp_t mflags = GFP_ATOMIC; - - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) - mflags = GFP_KERNEL; - - ctx->digest_buf = dma_pool_alloc(spacc_hash_pool, mflags, - &ctx->digest_dma); - - if (!ctx->digest_buf) - return -ENOMEM; - - rc = pdu_ddt_init(&ctx->dst, 1 | 0x80000000); - if (rc < 0) { - pr_err("ERR: PDU DDT init error\n"); - rc = -EIO; - goto err_free_digest; - } - - pdu_ddt_add(&ctx->dst, ctx->digest_dma, SPACC_MAX_DIGEST_SIZE); - - if (ctx->total_nents > 0 && ctx->single_shot) { - /* single shot */ - spacc_ctx_clone_handle(req); - - if (req->nbytes) { - rc = spacc_sg_to_ddt(dev, req->src, req->nbytes, - &ctx->src, DMA_TO_DEVICE); - } else { - memset(tctx->tmp_buffer, '\0', PPP_BUF_SIZE); - sg_set_buf(&(tctx->tmp_sgl[0]), tctx->tmp_buffer, - PPP_BUF_SIZE); - rc = spacc_sg_to_ddt(dev, &(tctx->tmp_sgl[0]), - tctx->tmp_sgl[0].length, - &ctx->src, DMA_TO_DEVICE); - - } - } else if (ctx->total_nents == 0 && req->nbytes == 0) { - spacc_ctx_clone_handle(req); - - /* zero length case */ - memset(tctx->tmp_buffer, '\0', PPP_BUF_SIZE); - sg_set_buf(&(tctx->tmp_sgl[0]), tctx->tmp_buffer, PPP_BUF_SIZE); - rc = spacc_sg_to_ddt(dev, &(tctx->tmp_sgl[0]), - tctx->tmp_sgl[0].length, - &ctx->src, DMA_TO_DEVICE); - - } - - if (rc < 0) - goto err_free_dst; - - ctx->src_nents = rc; - - return rc; - -err_free_dst: - pdu_ddt_free(&ctx->dst); -err_free_digest: - dma_pool_free(spacc_hash_pool, ctx->digest_buf, ctx->digest_dma); - - return rc; -} - -static void spacc_free_mems(struct spacc_crypto_reqctx *ctx, - struct spacc_crypto_ctx *tctx, - struct ahash_request *req) -{ - spacc_hash_cleanup_dma_dst(tctx, req); - spacc_hash_cleanup_dma_src(tctx, req); - - if (ctx->single_shot) { - kfree(tctx->tmp_sgl); - tctx->tmp_sgl = NULL; - - ctx->single_shot = 0; - if (ctx->total_nents) - ctx->total_nents = 0; - } -} - -static void spacc_digest_cb(void *spacc, void *tfm) -{ - struct ahash_cb_data *cb = tfm; - int err = -1; - int dig_sz; - - dig_sz = crypto_ahash_digestsize(crypto_ahash_reqtfm(cb->req)); - - if (cb->ctx->single_shot) - memcpy(cb->req->result, cb->ctx->digest_buf, dig_sz); - else - memcpy(cb->tctx->digest_ctx_buf, cb->ctx->digest_buf, dig_sz); - - err = cb->spacc->job[cb->new_handle].job_err; - - dma_pool_free(spacc_hash_pool, cb->ctx->digest_buf, - cb->ctx->digest_dma); - spacc_free_mems(cb->ctx, cb->tctx, cb->req); - spacc_close(cb->spacc, cb->new_handle); - - if (cb->req->base.complete) - ahash_request_complete(cb->req, err); -} - -static int do_shash(unsigned char *name, unsigned char *result, - const u8 *data1, unsigned int data1_len, - const u8 *data2, unsigned int data2_len, - const u8 *key, unsigned int key_len) -{ - int rc; - unsigned int size; - struct crypto_shash *hash; - struct sdesc *sdesc; - - hash = crypto_alloc_shash(name, 0, 0); - if (IS_ERR(hash)) { - rc = PTR_ERR(hash); - pr_err("ERR: Crypto %s allocation error %d\n", name, rc); - return rc; - } - - size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); - sdesc = kmalloc(size, GFP_KERNEL); - if (!sdesc) { - rc = -ENOMEM; - goto do_shash_err; - } - sdesc->shash.tfm = hash; - - if (key_len > 0) { - rc = crypto_shash_setkey(hash, key, key_len); - if (rc) { - pr_err("ERR: Could not setkey %s shash\n", name); - goto do_shash_err; - } - } - - rc = crypto_shash_init(&sdesc->shash); - if (rc) { - pr_err("ERR: Could not init %s shash\n", name); - goto do_shash_err; - } - - rc = crypto_shash_update(&sdesc->shash, data1, data1_len); - if (rc) { - pr_err("ERR: Could not update1\n"); - goto do_shash_err; - } - - if (data2 && data2_len) { - rc = crypto_shash_update(&sdesc->shash, data2, data2_len); - if (rc) { - pr_err("ERR: Could not update2\n"); - goto do_shash_err; - } - } - - rc = crypto_shash_final(&sdesc->shash, result); - if (rc) - pr_err("ERR: Could not generate %s hash\n", name); - -do_shash_err: - crypto_free_shash(hash); - kfree(sdesc); - - return rc; -} - -static int spacc_hash_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - int rc; - const struct spacc_alg *salg = spacc_tfm_ahash(&tfm->base); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm); - struct spacc_priv *priv = dev_get_drvdata(tctx->dev); - unsigned int digest_size, block_size; - char hash_alg[CRYPTO_MAX_ALG_NAME]; - - block_size = crypto_tfm_alg_blocksize(&tfm->base); - digest_size = crypto_ahash_digestsize(tfm); - - /* - * We will not use the hardware in case of HMACs - * This was meant for hashes but it works for cmac/xcbc since we - * only intend to support 128-bit keys... - */ - if (keylen > block_size && salg->mode->id != CRYPTO_MODE_MAC_CMAC) { - pr_debug("Exceeds keylen: %u\n", keylen); - pr_debug("Req. keylen hashing %s\n", - salg->calg->cra_name); - - memset(hash_alg, 0x00, CRYPTO_MAX_ALG_NAME); - switch (salg->mode->id) { - case CRYPTO_MODE_HMAC_SHA224: - rc = do_shash("sha224", tctx->ipad, key, keylen, - NULL, 0, NULL, 0); - break; - - case CRYPTO_MODE_HMAC_SHA256: - rc = do_shash("sha256", tctx->ipad, key, keylen, - NULL, 0, NULL, 0); - break; - - case CRYPTO_MODE_HMAC_SHA384: - rc = do_shash("sha384", tctx->ipad, key, keylen, - NULL, 0, NULL, 0); - break; - - case CRYPTO_MODE_HMAC_SHA512: - rc = do_shash("sha512", tctx->ipad, key, keylen, - NULL, 0, NULL, 0); - break; - - case CRYPTO_MODE_HMAC_MD5: - rc = do_shash("md5", tctx->ipad, key, keylen, - NULL, 0, NULL, 0); - break; - - case CRYPTO_MODE_HMAC_SHA1: - rc = do_shash("sha1", tctx->ipad, key, keylen, - NULL, 0, NULL, 0); - break; - - default: - return -EINVAL; - } - - if (rc < 0) { - pr_err("ERR: %d computing shash for %s\n", - rc, hash_alg); - return -EIO; - } - - keylen = digest_size; - pr_debug("updated keylen: %u\n", keylen); - - tctx->ctx_valid = false; - - if (salg->mode->sw_fb) { - rc = crypto_ahash_setkey(tctx->fb.hash, - tctx->ipad, keylen); - if (rc < 0) - return rc; - } - } else { - memcpy(tctx->ipad, key, keylen); - tctx->ctx_valid = false; - - if (salg->mode->sw_fb) { - rc = crypto_ahash_setkey(tctx->fb.hash, key, keylen); - if (rc < 0) - return rc; - } - } - - /* close handle since key size may have changed */ - if (tctx->handle >= 0) { - spacc_close(&priv->spacc, tctx->handle); - put_device(tctx->dev); - tctx->handle = -1; - tctx->dev = NULL; - } - - priv = NULL; - priv = dev_get_drvdata(salg->dev[0]); - tctx->dev = get_device(salg->dev[0]); - if (spacc_isenabled(&priv->spacc, salg->mode->id, keylen)) { - tctx->handle = spacc_open(&priv->spacc, - CRYPTO_MODE_NULL, - salg->mode->id, -1, - 0, spacc_digest_cb, tfm); - - } else - pr_debug(" Keylen: %d not enabled for algo: %d", - keylen, salg->mode->id); - - if (tctx->handle < 0) { - pr_err("ERR: Failed to open SPAcc context\n"); - put_device(salg->dev[0]); - return -EIO; - } - - rc = spacc_set_operation(&priv->spacc, tctx->handle, OP_ENCRYPT, - ICV_HASH, IP_ICV_OFFSET, 0, 0, 0); - if (rc < 0) { - spacc_close(&priv->spacc, tctx->handle); - tctx->handle = -1; - put_device(tctx->dev); - return -EIO; - } - - if (salg->mode->id == CRYPTO_MODE_MAC_XCBC || - salg->mode->id == CRYPTO_MODE_MAC_SM4_XCBC) { - rc = spacc_compute_xcbc_key(&priv->spacc, salg->mode->id, - tctx->handle, tctx->ipad, - keylen, tctx->ipad); - if (rc < 0) { - dev_warn(tctx->dev, - "Failed to compute XCBC key: %d\n", rc); - return -EIO; - } - rc = spacc_write_context(&priv->spacc, tctx->handle, - SPACC_HASH_OPERATION, tctx->ipad, - 32 + keylen, NULL, 0); - } else { - rc = spacc_write_context(&priv->spacc, tctx->handle, - SPACC_HASH_OPERATION, tctx->ipad, - keylen, NULL, 0); - } - - memset(tctx->ipad, 0, sizeof(tctx->ipad)); - if (rc < 0) { - pr_err("ERR: Failed to write SPAcc context\n"); - /* Non-fatal; we continue with the software fallback. */ - return 0; - } - - tctx->ctx_valid = true; - - return 0; -} - -static int spacc_set_statesize(struct spacc_alg *salg) -{ - unsigned int statesize = 0; - - switch (salg->mode->id) { - case CRYPTO_MODE_HMAC_SHA1: - case CRYPTO_MODE_HASH_SHA1: - statesize = sizeof(struct sha1_state); - break; - case CRYPTO_MODE_MAC_CMAC: - case CRYPTO_MODE_MAC_XCBC: - statesize = sizeof(struct crypto_aes_ctx); - break; - case CRYPTO_MODE_MAC_SM4_CMAC: - case CRYPTO_MODE_MAC_SM4_XCBC: - statesize = sizeof(struct sm4_ctx); - break; - case CRYPTO_MODE_HMAC_MD5: - case CRYPTO_MODE_HASH_MD5: - statesize = sizeof(struct md5_state); - break; - case CRYPTO_MODE_HASH_SHA224: - case CRYPTO_MODE_HASH_SHA256: - case CRYPTO_MODE_HMAC_SHA224: - case CRYPTO_MODE_HMAC_SHA256: - statesize = sizeof(struct sha256_state); - break; - case CRYPTO_MODE_HMAC_SHA512: - case CRYPTO_MODE_HASH_SHA512: - statesize = sizeof(struct sha512_state); - break; - case CRYPTO_MODE_HMAC_SHA384: - case CRYPTO_MODE_HASH_SHA384: - statesize = sizeof(struct spacc_crypto_reqctx); - break; - case CRYPTO_MODE_HASH_SHA3_224: - case CRYPTO_MODE_HASH_SHA3_256: - case CRYPTO_MODE_HASH_SHA3_384: - case CRYPTO_MODE_HASH_SHA3_512: - statesize = sizeof(struct sha3_state); - break; - case CRYPTO_MODE_HMAC_SM3: - case CRYPTO_MODE_MAC_MICHAEL: - statesize = sizeof(struct spacc_crypto_reqctx); - break; - default: - break; - } - - return statesize; -} - -static int spacc_hash_cra_init(struct crypto_tfm *tfm) -{ - const struct spacc_alg *salg = spacc_tfm_ahash(tfm); - struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm); - struct spacc_priv *priv = NULL; - - tctx->handle = -1; - tctx->ctx_valid = false; - tctx->dev = get_device(salg->dev[0]); - - if (salg->mode->sw_fb) { - tctx->fb.hash = crypto_alloc_ahash(salg->calg->cra_name, 0, - CRYPTO_ALG_NEED_FALLBACK); - - if (IS_ERR(tctx->fb.hash)) { - if (tctx->handle >= 0) - spacc_close(&priv->spacc, tctx->handle); - put_device(tctx->dev); - return PTR_ERR(tctx->fb.hash); - } - - crypto_ahash_set_statesize(__crypto_ahash_cast(tfm), - crypto_ahash_statesize(tctx->fb.hash)); - - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct spacc_crypto_reqctx) + - crypto_ahash_reqsize(tctx->fb.hash)); - - } else { - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct spacc_crypto_reqctx)); - } - - return 0; -} - -static void spacc_hash_cra_exit(struct crypto_tfm *tfm) -{ - struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm); - struct spacc_priv *priv = dev_get_drvdata(tctx->dev); - - crypto_free_ahash(tctx->fb.hash); - - if (tctx->handle >= 0) - spacc_close(&priv->spacc, tctx->handle); - - put_device(tctx->dev); -} - -static int spacc_hash_init(struct ahash_request *req) -{ - int rc = 0; - struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - - ctx->digest_buf = NULL; - ctx->single_shot = 0; - ctx->total_nents = 0; - tctx->tmp_sgl = NULL; - - ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); - ctx->fb.hash_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rc = crypto_ahash_init(&ctx->fb.hash_req); - - return rc; -} - -static int spacc_hash_update(struct ahash_request *req) -{ - int rc; - int nbytes = req->nbytes; - - struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - - if (!nbytes) - return 0; - - pr_debug("%s Using SW fallback\n", __func__); - - - ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); - ctx->fb.hash_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - ctx->fb.hash_req.nbytes = req->nbytes; - ctx->fb.hash_req.src = req->src; - - rc = crypto_ahash_update(&ctx->fb.hash_req); - return rc; -} - -static int spacc_hash_final(struct ahash_request *req) -{ - struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - int rc; - - - ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); - ctx->fb.hash_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - ctx->fb.hash_req.result = req->result; - - rc = crypto_ahash_final(&ctx->fb.hash_req); - return rc; -} - -static int spacc_hash_digest(struct ahash_request *req) -{ - int ret, final = 0; - int rc; - struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - struct spacc_priv *priv = dev_get_drvdata(tctx->dev); - const struct spacc_alg *salg = spacc_tfm_ahash(&reqtfm->base); - - - /* direct single shot digest call */ - ctx->single_shot = 1; - ctx->total_nents = sg_nents(req->src); - - /* alloc tmp_sgl */ - tctx->tmp_sgl = kmalloc(sizeof(*(tctx->tmp_sgl)) * 2, GFP_KERNEL); - - if (!tctx->tmp_sgl) - return -ENOMEM; - - sg_init_table(tctx->tmp_sgl, 2); - tctx->tmp_sgl[0].length = 0; - - - if (tctx->handle < 0 || !tctx->ctx_valid) { - priv = NULL; - pr_debug("%s: open SPAcc context\n", __func__); - - priv = dev_get_drvdata(salg->dev[0]); - tctx->dev = get_device(salg->dev[0]); - ret = spacc_isenabled(&priv->spacc, salg->mode->id, 0); - if (ret) - tctx->handle = spacc_open(&priv->spacc, - CRYPTO_MODE_NULL, - salg->mode->id, -1, 0, - spacc_digest_cb, - reqtfm); - - if (tctx->handle < 0) { - put_device(salg->dev[0]); - pr_debug("Failed to open SPAcc context\n"); - goto fallback; - } - - rc = spacc_set_operation(&priv->spacc, tctx->handle, - OP_ENCRYPT, ICV_HASH, IP_ICV_OFFSET, - 0, 0, 0); - if (rc < 0) { - spacc_close(&priv->spacc, tctx->handle); - pr_debug("Failed to open SPAcc context\n"); - tctx->handle = -1; - put_device(tctx->dev); - goto fallback; - } - tctx->ctx_valid = true; - } - - rc = spacc_hash_init_dma(tctx->dev, req, final); - if (rc < 0) - goto fallback; - - if (rc == 0) - return 0; - - rc = spacc_packet_enqueue_ddt(&priv->spacc, ctx->acb.new_handle, - &ctx->src, &ctx->dst, req->nbytes, - 0, req->nbytes, 0, 0, 0); - - if (rc < 0) { - spacc_hash_cleanup_dma(tctx->dev, req); - spacc_close(&priv->spacc, ctx->acb.new_handle); - - if (rc != -EBUSY) { - pr_debug("Failed to enqueue job, ERR: %d\n", rc); - return rc; - } - - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; - - goto fallback; - } - - return -EINPROGRESS; - -fallback: - /* Start from scratch as init is not called before digest */ - ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); - ctx->fb.hash_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - ctx->fb.hash_req.nbytes = req->nbytes; - ctx->fb.hash_req.src = req->src; - ctx->fb.hash_req.result = req->result; - - return crypto_ahash_digest(&ctx->fb.hash_req); -} - -static int spacc_hash_finup(struct ahash_request *req) -{ - struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - int rc; - - ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); - ctx->fb.hash_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - ctx->fb.hash_req.nbytes = req->nbytes; - ctx->fb.hash_req.src = req->src; - ctx->fb.hash_req.result = req->result; - - rc = crypto_ahash_finup(&ctx->fb.hash_req); - return rc; -} - -static int spacc_hash_import(struct ahash_request *req, const void *in) -{ - int rc; - struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - - ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); - ctx->fb.hash_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rc = crypto_ahash_import(&ctx->fb.hash_req, in); - return rc; -} - -static int spacc_hash_export(struct ahash_request *req, void *out) -{ - int rc; - struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req); - - ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash); - ctx->fb.hash_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rc = crypto_ahash_export(&ctx->fb.hash_req, out); - return rc; -} - -static const struct ahash_alg spacc_hash_template = { - .init = spacc_hash_init, - .update = spacc_hash_update, - .final = spacc_hash_final, - .finup = spacc_hash_finup, - .digest = spacc_hash_digest, - .setkey = spacc_hash_setkey, - .export = spacc_hash_export, - .import = spacc_hash_import, - - .halg.base = { - .cra_priority = 300, - .cra_module = THIS_MODULE, - .cra_init = spacc_hash_cra_init, - .cra_exit = spacc_hash_cra_exit, - .cra_ctxsize = sizeof(struct spacc_crypto_ctx), - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK | - CRYPTO_ALG_OPTIONAL_KEY - }, -}; - -static int spacc_register_hash(struct spacc_alg *salg) -{ - int rc; - - salg->calg = &salg->alg.hash.halg.base; - salg->alg.hash = spacc_hash_template; - - spacc_init_calg(salg->calg, salg->mode); - salg->alg.hash.halg.digestsize = salg->mode->hashlen; - salg->alg.hash.halg.statesize = spacc_set_statesize(salg); - - rc = crypto_register_ahash(&salg->alg.hash); - if (rc < 0) - return rc; - - mutex_lock(&spacc_hash_alg_mutex); - list_add(&salg->list, &spacc_hash_alg_list); - mutex_unlock(&spacc_hash_alg_mutex); - - return 0; -} - - -int probe_hashes(struct platform_device *spacc_pdev) -{ - int rc; - unsigned int i; - int registered = 0; - struct spacc_alg *salg; - struct spacc_priv *priv = dev_get_drvdata(&spacc_pdev->dev); - - spacc_hash_pool = dma_pool_create("spacc-digest", &spacc_pdev->dev, - SPACC_MAX_DIGEST_SIZE, - SPACC_DMA_ALIGN, SPACC_DMA_BOUNDARY); - - if (!spacc_hash_pool) - return -ENOMEM; - - for (i = 0; i < ARRAY_SIZE(possible_hashes); i++) - possible_hashes[i].valid = 0; - - for (i = 0; i < ARRAY_SIZE(possible_hashes); i++) { - if (possible_hashes[i].valid == 0 && - spacc_isenabled(&priv->spacc, - possible_hashes[i].id & 0xFF, - possible_hashes[i].hashlen)) { - - salg = kmalloc(sizeof(*salg), GFP_KERNEL); - if (!salg) - return -ENOMEM; - - salg->mode = &possible_hashes[i]; - - /* Copy all dev's over to the salg */ - salg->dev[0] = &spacc_pdev->dev; - salg->dev[1] = NULL; - - rc = spacc_register_hash(salg); - if (rc < 0) { - kfree(salg); - continue; - } - pr_debug("registered %s\n", - possible_hashes[i].name); - - registered++; - possible_hashes[i].valid = 1; - } - } - - return registered; -} - -int spacc_unregister_hash_algs(void) -{ - struct spacc_alg *salg, *tmp; - - mutex_lock(&spacc_hash_alg_mutex); - list_for_each_entry_safe(salg, tmp, &spacc_hash_alg_list, list) { - crypto_unregister_alg(salg->calg); - list_del(&salg->list); - kfree(salg); - } - mutex_unlock(&spacc_hash_alg_mutex); - - dma_pool_destroy(spacc_hash_pool); - - return 0; -} diff --git a/drivers/crypto/dwc-spacc/spacc_core.c b/drivers/crypto/dwc-spacc/spacc_core.c deleted file mode 100644 index 1da7cdd93e78..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_core.c +++ /dev/null @@ -1,2513 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include -#include -#include "spacc_hal.h" -#include "spacc_core.h" - -static const u8 spacc_ctrl_map[SPACC_CTRL_VER_SIZE][SPACC_CTRL_MAPSIZE] = { - { 0, 8, 4, 12, 24, 16, 31, 25, 26, 27, 28, 29, 14, 15 }, - { 0, 8, 3, 12, 24, 16, 31, 25, 26, 27, 28, 29, 14, 15 }, - { 0, 4, 8, 13, 15, 16, 24, 25, 26, 27, 28, 29, 30, 31 } -}; - -static const int keysizes[2][7] = { - /* 1 2 4 8 16 32 64 */ - { 5, 8, 16, 24, 32, 0, 0 }, /* cipher key sizes*/ - { 8, 16, 20, 24, 32, 64, 128 }, /* hash key sizes*/ -}; - - -/* bits are 40, 64, 128, 192, 256, and top bit for hash */ -static const unsigned char template[] = { - [CRYPTO_MODE_NULL] = 0, - [CRYPTO_MODE_AES_ECB] = 28, /* AESECB 128/224/256 */ - [CRYPTO_MODE_AES_CBC] = 28, /* AESCBC 128/224/256 */ - [CRYPTO_MODE_AES_CTR] = 28, /* AESCTR 128/224/256 */ - [CRYPTO_MODE_AES_CCM] = 28, /* AESCCM 128/224/256 */ - [CRYPTO_MODE_AES_GCM] = 28, /* AESGCM 128/224/256 */ - [CRYPTO_MODE_AES_F8] = 28, /* AESF8 128/224/256 */ - [CRYPTO_MODE_AES_XTS] = 20, /* AESXTS 128/256 */ - [CRYPTO_MODE_AES_CFB] = 28, /* AESCFB 128/224/256 */ - [CRYPTO_MODE_AES_OFB] = 28, /* AESOFB 128/224/256 */ - [CRYPTO_MODE_AES_CS1] = 28, /* AESCS1 128/224/256 */ - [CRYPTO_MODE_AES_CS2] = 28, /* AESCS2 128/224/256 */ - [CRYPTO_MODE_AES_CS3] = 28, /* AESCS3 128/224/256 */ - [CRYPTO_MODE_MULTI2_ECB] = 0, /* MULTI2 */ - [CRYPTO_MODE_MULTI2_CBC] = 0, /* MULTI2 */ - [CRYPTO_MODE_MULTI2_OFB] = 0, /* MULTI2 */ - [CRYPTO_MODE_MULTI2_CFB] = 0, /* MULTI2 */ - [CRYPTO_MODE_3DES_CBC] = 8, /* 3DES CBC */ - [CRYPTO_MODE_3DES_ECB] = 8, /* 3DES ECB */ - [CRYPTO_MODE_DES_CBC] = 2, /* DES CBC */ - [CRYPTO_MODE_DES_ECB] = 2, /* DES ECB */ - [CRYPTO_MODE_KASUMI_ECB] = 4, /* KASUMI ECB */ - [CRYPTO_MODE_KASUMI_F8] = 4, /* KASUMI F8 */ - [CRYPTO_MODE_SNOW3G_UEA2] = 4, /* SNOW3G */ - [CRYPTO_MODE_ZUC_UEA3] = 4, /* ZUC */ - [CRYPTO_MODE_CHACHA20_STREAM] = 16, /* CHACHA20 */ - [CRYPTO_MODE_CHACHA20_POLY1305] = 16, /* CHACHA20 */ - [CRYPTO_MODE_SM4_ECB] = 4, /* SM4ECB 128 */ - [CRYPTO_MODE_SM4_CBC] = 4, /* SM4CBC 128 */ - [CRYPTO_MODE_SM4_CFB] = 4, /* SM4CFB 128 */ - [CRYPTO_MODE_SM4_OFB] = 4, /* SM4OFB 128 */ - [CRYPTO_MODE_SM4_CTR] = 4, /* SM4CTR 128 */ - [CRYPTO_MODE_SM4_CCM] = 4, /* SM4CCM 128 */ - [CRYPTO_MODE_SM4_GCM] = 4, /* SM4GCM 128 */ - [CRYPTO_MODE_SM4_F8] = 4, /* SM4F8 128 */ - [CRYPTO_MODE_SM4_XTS] = 4, /* SM4XTS 128 */ - [CRYPTO_MODE_SM4_CS1] = 4, /* SM4CS1 128 */ - [CRYPTO_MODE_SM4_CS2] = 4, /* SM4CS2 128 */ - [CRYPTO_MODE_SM4_CS3] = 4, /* SM4CS3 128 */ - - [CRYPTO_MODE_HASH_MD5] = 242, - [CRYPTO_MODE_HMAC_MD5] = 242, - [CRYPTO_MODE_HASH_SHA1] = 242, - [CRYPTO_MODE_HMAC_SHA1] = 242, - [CRYPTO_MODE_HASH_SHA224] = 242, - [CRYPTO_MODE_HMAC_SHA224] = 242, - [CRYPTO_MODE_HASH_SHA256] = 242, - [CRYPTO_MODE_HMAC_SHA256] = 242, - [CRYPTO_MODE_HASH_SHA384] = 242, - [CRYPTO_MODE_HMAC_SHA384] = 242, - [CRYPTO_MODE_HASH_SHA512] = 242, - [CRYPTO_MODE_HMAC_SHA512] = 242, - [CRYPTO_MODE_HASH_SHA512_224] = 242, - [CRYPTO_MODE_HMAC_SHA512_224] = 242, - [CRYPTO_MODE_HASH_SHA512_256] = 242, - [CRYPTO_MODE_HMAC_SHA512_256] = 242, - [CRYPTO_MODE_MAC_XCBC] = 154, /* XaCBC */ - [CRYPTO_MODE_MAC_CMAC] = 154, /* CMAC */ - [CRYPTO_MODE_MAC_KASUMI_F9] = 130, /* KASUMI */ - [CRYPTO_MODE_MAC_SNOW3G_UIA2] = 130, /* SNOW */ - [CRYPTO_MODE_MAC_ZUC_UIA3] = 130, /* ZUC */ - [CRYPTO_MODE_MAC_POLY1305] = 144, - [CRYPTO_MODE_SSLMAC_MD5] = 130, - [CRYPTO_MODE_SSLMAC_SHA1] = 132, - [CRYPTO_MODE_HASH_CRC32] = 0, - [CRYPTO_MODE_MAC_MICHAEL] = 129, - - [CRYPTO_MODE_HASH_SHA3_224] = 242, - [CRYPTO_MODE_HASH_SHA3_256] = 242, - [CRYPTO_MODE_HASH_SHA3_384] = 242, - [CRYPTO_MODE_HASH_SHA3_512] = 242, - [CRYPTO_MODE_HASH_SHAKE128] = 242, - [CRYPTO_MODE_HASH_SHAKE256] = 242, - [CRYPTO_MODE_HASH_CSHAKE128] = 130, - [CRYPTO_MODE_HASH_CSHAKE256] = 130, - [CRYPTO_MODE_MAC_KMAC128] = 242, - [CRYPTO_MODE_MAC_KMAC256] = 242, - [CRYPTO_MODE_MAC_KMACXOF128] = 242, - [CRYPTO_MODE_MAC_KMACXOF256] = 242, - [CRYPTO_MODE_HASH_SM3] = 242, - [CRYPTO_MODE_HMAC_SM3] = 242, - [CRYPTO_MODE_MAC_SM4_XCBC] = 242, - [CRYPTO_MODE_MAC_SM4_CMAC] = 242, -}; - -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AUTODETECT) -static const struct { - unsigned int min_version; - struct { - int outlen; - unsigned char data[64]; - } test[7]; -} testdata[CRYPTO_MODE_LAST] = { - /* NULL*/ - { .min_version = 0x65, - .test[0].outlen = 0 - }, - - /* AES_ECB*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0xc6, 0xa1, 0x3b, 0x37, - 0x87, 0x8f, 0x5b, 0x82, 0x6f, 0x4f, 0x81, 0x62, 0xa1, - 0xc8, 0xd8, 0x79, }, - .test[3].outlen = 16, .test[3].data = { 0x91, 0x62, 0x51, 0x82, - 0x1c, 0x73, 0xa5, 0x22, 0xc3, 0x96, 0xd6, 0x27, 0x38, - 0x01, 0x96, 0x07, }, - .test[4].outlen = 16, .test[4].data = { 0xf2, 0x90, 0x00, 0xb6, - 0x2a, 0x49, 0x9f, 0xd0, 0xa9, 0xf3, 0x9a, 0x6a, 0xdd, - 0x2e, 0x77, 0x80, }, - }, - - /* AES_CBC*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x0a, 0x94, 0x0b, 0xb5, - 0x41, 0x6e, 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, - 0x53, 0xea, 0x5a, }, - .test[3].outlen = 16, .test[3].data = { 0x00, 0x60, 0xbf, 0xfe, - 0x46, 0x83, 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, - 0xf2, 0x20, 0xae, }, - .test[4].outlen = 16, .test[4].data = { 0x5a, 0x6e, 0x04, 0x57, - 0x08, 0xfb, 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, - 0xc3, 0xa6, 0x92, }, - }, - - /* AES_CTR*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x0a, 0x94, 0x0b, 0xb5, - 0x41, 0x6e, 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, - 0x53, 0xea, 0x5a, }, - .test[3].outlen = 16, .test[3].data = { 0x00, 0x60, 0xbf, 0xfe, - 0x46, 0x83, 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, - 0xf2, 0x20, 0xae, }, - .test[4].outlen = 16, .test[4].data = { 0x5a, 0x6e, 0x04, 0x57, - 0x08, 0xfb, 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, - 0xc3, 0xa6, 0x92, }, - }, - - /* AES_CCM*/ - { .min_version = 0x65, - .test[2].outlen = 32, .test[2].data = { 0x02, 0x63, 0xec, 0x94, - 0x66, 0x18, 0x72, 0x96, 0x9a, 0xda, 0xfd, 0x0f, 0x4b, - 0xa4, 0x0f, 0xdc, 0xa5, 0x09, 0x92, 0x93, 0xb6, 0xb4, - 0x38, 0x34, 0x63, 0x72, 0x50, 0x4c, 0xfc, 0x8a, 0x63, - 0x02, }, - .test[3].outlen = 32, .test[3].data = { 0x29, 0xf7, 0x63, 0xe8, - 0xa1, 0x75, 0xc6, 0xbf, 0xa5, 0x54, 0x94, 0x89, 0x12, - 0x84, 0x45, 0xf5, 0x9b, 0x27, 0xeb, 0xb1, 0xa4, 0x65, - 0x93, 0x6e, 0x5a, 0xc0, 0xa2, 0xa3, 0xe2, 0x6c, 0x46, - 0x29, }, - .test[4].outlen = 32, .test[4].data = { 0x60, 0xf3, 0x10, 0xd5, - 0xc3, 0x85, 0x58, 0x5d, 0x55, 0x16, 0xfb, 0x51, 0x72, - 0xe5, 0x20, 0xcf, 0x8e, 0x87, 0x6d, 0x72, 0xc8, 0x44, - 0xbe, 0x6d, 0xa2, 0xd6, 0xf4, 0xba, 0xec, 0xb4, 0xec, - 0x39, }, - }, - - /* AES_GCM*/ - { .min_version = 0x65, - .test[2].outlen = 32, .test[2].data = { 0x93, 0x6c, 0xa7, 0xce, - 0x66, 0x1b, 0xf7, 0x54, 0x4b, 0xd2, 0x61, 0x8a, 0x36, - 0xa3, 0x70, 0x08, 0xc0, 0xd7, 0xd0, 0x77, 0xc5, 0x64, - 0x76, 0xdb, 0x48, 0x4a, 0x53, 0xe3, 0x6c, 0x93, 0x34, - 0x0f, }, - .test[3].outlen = 32, .test[3].data = { 0xe6, 0xf9, 0x22, 0x9b, - 0x99, 0xb9, 0xc9, 0x0e, 0xd0, 0x33, 0xdc, 0x82, 0xff, - 0xa9, 0xdc, 0x70, 0x4c, 0xcd, 0xc4, 0x1b, 0xa3, 0x5a, - 0x87, 0x5d, 0xd8, 0xef, 0xb6, 0x48, 0xbb, 0x0c, 0x92, - 0x60, }, - .test[4].outlen = 32, .test[4].data = { 0x47, 0x02, 0xd6, 0x1b, - 0xc5, 0xe5, 0xc2, 0x1b, 0x8d, 0x41, 0x97, 0x8b, 0xb1, - 0xe9, 0x78, 0x6d, 0x48, 0x6f, 0x78, 0x81, 0xc7, 0x98, - 0xcc, 0xf5, 0x28, 0xf1, 0x01, 0x7c, 0xe8, 0xf6, 0x09, - 0x78, }, - }, - - /* AES-F8*/ - { .min_version = 0x65, - .test[0].outlen = 0 - }, - - /* AES-XTS*/ - { .min_version = 0x65, - .test[2].outlen = 32, .test[2].data = { 0xa0, 0x1a, 0x6f, 0x09, - 0xfa, 0xef, 0xd2, 0x72, 0xc3, 0x9b, 0xad, 0x35, 0x52, - 0xfc, 0xa1, 0xcb, 0x33, 0x69, 0x51, 0xc5, 0x23, 0xbe, - 0xac, 0xa5, 0x4a, 0xf2, 0xfc, 0x77, 0x71, 0x6f, 0x9a, - 0x86, }, - .test[4].outlen = 32, .test[4].data = { 0x05, 0x45, 0x91, 0x86, - 0xf2, 0x2d, 0x97, 0x93, 0xf3, 0xa0, 0xbb, 0x29, 0xc7, - 0x9c, 0xc1, 0x4c, 0x3b, 0x8f, 0xdd, 0x9d, 0xda, 0xc7, - 0xb5, 0xaa, 0xc2, 0x7c, 0x2e, 0x71, 0xce, 0x7f, 0xce, - 0x0e, }, - }, - - /* AES-CFB*/ - { .min_version = 0x65, - .test[0].outlen = 0 - }, - - /* AES-OFB*/ - { .min_version = 0x65, - .test[0].outlen = 0 - }, - - /* AES-CS1*/ - { .min_version = 0x65, - .test[2].outlen = 31, .test[2].data = { 0x0a, 0x94, 0x0b, 0xb5, - 0x41, 0x6e, 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, - 0x53, 0xea, 0xae, 0xe7, 0x1e, 0xa5, 0x41, 0xd7, 0xae, - 0x4b, 0xeb, 0x60, 0xbe, 0xcc, 0x59, 0x3f, 0xb6, 0x63, - }, - .test[3].outlen = 31, .test[3].data = { 0x00, 0x60, 0xbf, 0xfe, - 0x46, 0x83, 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, - 0xf2, 0x20, 0x2e, 0x84, 0xcb, 0x12, 0xa3, 0x59, 0x17, - 0xb0, 0x9e, 0x25, 0xa2, 0xa2, 0x3d, 0xf1, 0x9f, 0xdc, - }, - .test[4].outlen = 31, .test[4].data = { 0x5a, 0x6e, 0x04, 0x57, - 0x08, 0xfb, 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, - 0xc3, 0xa6, 0xcd, 0xfc, 0x25, 0x35, 0x31, 0x0b, 0xf5, - 0x6b, 0x2e, 0xb7, 0x8a, 0xa2, 0x5a, 0xdd, 0x77, 0x51, - }, - }, - - /* AES-CS2*/ - { .min_version = 0x65, - .test[2].outlen = 31, .test[2].data = { 0xae, 0xe7, 0x1e, 0xa5, - 0x41, 0xd7, 0xae, 0x4b, 0xeb, 0x60, 0xbe, 0xcc, 0x59, - 0x3f, 0xb6, 0x63, 0x0a, 0x94, 0x0b, 0xb5, 0x41, 0x6e, - 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, 0x53, 0xea, - }, - .test[3].outlen = 31, .test[3].data = { 0x2e, 0x84, 0xcb, 0x12, - 0xa3, 0x59, 0x17, 0xb0, 0x9e, 0x25, 0xa2, 0xa2, 0x3d, - 0xf1, 0x9f, 0xdc, 0x00, 0x60, 0xbf, 0xfe, 0x46, 0x83, - 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, 0xf2, 0x20, - }, - .test[4].outlen = 31, .test[4].data = { 0xcd, 0xfc, 0x25, 0x35, - 0x31, 0x0b, 0xf5, 0x6b, 0x2e, 0xb7, 0x8a, 0xa2, 0x5a, - 0xdd, 0x77, 0x51, 0x5a, 0x6e, 0x04, 0x57, 0x08, 0xfb, - 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, 0xc3, 0xa6, - }, - }, - - /* AES-CS3*/ - { .min_version = 0x65, - .test[2].outlen = 31, .test[2].data = { 0xae, 0xe7, 0x1e, 0xa5, - 0x41, 0xd7, 0xae, 0x4b, 0xeb, 0x60, 0xbe, 0xcc, 0x59, - 0x3f, 0xb6, 0x63, 0x0a, 0x94, 0x0b, 0xb5, 0x41, 0x6e, - 0xf0, 0x45, 0xf1, 0xc3, 0x94, 0x58, 0xc6, 0x53, 0xea, - }, - .test[3].outlen = 31, .test[3].data = { 0x2e, 0x84, 0xcb, 0x12, - 0xa3, 0x59, 0x17, 0xb0, 0x9e, 0x25, 0xa2, 0xa2, 0x3d, - 0xf1, 0x9f, 0xdc, 0x00, 0x60, 0xbf, 0xfe, 0x46, 0x83, - 0x4b, 0xb8, 0xda, 0x5c, 0xf9, 0xa6, 0x1f, 0xf2, 0x20, - }, - .test[4].outlen = 31, .test[4].data = { 0xcd, 0xfc, 0x25, 0x35, - 0x31, 0x0b, 0xf5, 0x6b, 0x2e, 0xb7, 0x8a, 0xa2, 0x5a, - 0xdd, 0x77, 0x51, 0x5a, 0x6e, 0x04, 0x57, 0x08, 0xfb, - 0x71, 0x96, 0xf0, 0x2e, 0x55, 0x3d, 0x02, 0xc3, 0xa6, - }, - }, - - /* MULTI2*/ - { .min_version = 0x65, - .test[0].outlen = 0 - }, - { .min_version = 0x65, - .test[0].outlen = 0 - }, - { .min_version = 0x65, - .test[0].outlen = 0 - }, - { .min_version = 0x65, - .test[0].outlen = 0 - }, - - /* 3DES_CBC*/ - { .min_version = 0x65, - .test[3].outlen = 16, .test[3].data = { 0x58, 0xed, 0x24, 0x8f, - 0x77, 0xf6, 0xb1, 0x9e, 0x47, 0xd9, 0xb7, 0x4a, 0x4f, - 0x5a, 0xe6, 0x6d, } - }, - - /* 3DES_ECB*/ - { .min_version = 0x65, - .test[3].outlen = 16, .test[3].data = { 0x89, 0x4b, 0xc3, 0x08, - 0x54, 0x26, 0xa4, 0x41, 0x89, 0x4b, 0xc3, 0x08, 0x54, - 0x26, 0xa4, 0x41, } - }, - - /* DES_CBC*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0xe1, 0xb2, 0x46, 0xe5, - 0xa7, 0xc7, 0x4c, 0xbc, 0xd5, 0xf0, 0x8e, 0x25, 0x3b, - 0xfa, 0x23, 0x80, } - }, - - /* DES_ECB*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0xa5, 0x17, 0x3a, - 0xd5, 0x95, 0x7b, 0x43, 0x70, 0xa5, 0x17, 0x3a, 0xd5, - 0x95, 0x7b, 0x43, 0x70, } - }, - - /* KASUMI_ECB*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x04, 0x7d, 0x5d, - 0x2c, 0x8c, 0x2e, 0x91, 0xb3, 0x04, 0x7d, 0x5d, 0x2c, - 0x8c, 0x2e, 0x91, 0xb3, } }, - - /* KASUMI_F8*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0xfc, 0xf7, 0x45, - 0xee, 0x1d, 0xbb, 0xa4, 0x57, 0xa7, 0x45, 0xdc, 0x6b, - 0x2a, 0x1b, 0x50, 0x88, } - }, - - /* SNOW3G UEA2*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x95, 0xd3, 0xc8, - 0x13, 0xc0, 0x20, 0x24, 0xa3, 0x76, 0x24, 0xd1, 0x98, - 0xb6, 0x67, 0x4d, 0x4c, } - }, - - /* ZUC UEA3*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0xda, 0xdf, 0xb6, - 0xa2, 0xac, 0x9d, 0xba, 0xfe, 0x18, 0x9c, 0x0c, 0x75, - 0x79, 0xc6, 0xe0, 0x4e, } - }, - - /* CHACHA20_STREAM*/ - { .min_version = 0x65, - .test[4].outlen = 16, .test[4].data = { 0x55, 0xdf, 0x91, - 0xe9, 0x27, 0x01, 0x37, 0x69, 0xdb, 0x38, 0xd4, 0x28, - 0x01, 0x79, 0x76, 0x64 } - }, - - /* CHACHA20_POLY1305 (AEAD)*/ - { .min_version = 0x65, - .test[4].outlen = 16, .test[4].data = { 0x89, 0xfb, 0x08, - 0x00, 0x29, 0x17, 0xa5, 0x40, 0xb7, 0x83, 0x3f, 0xf3, - 0x98, 0x1d, 0x0e, 0x63 } - }, - - /* SM4_ECB 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x1e, 0x96, 0x34, - 0xb7, 0x70, 0xf9, 0xae, 0xba, 0xa9, 0x34, 0x4f, 0x5a, - 0xff, 0x9f, 0x82, 0xa3 } - }, - - /* SM4_CBC 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, - 0x3e, 0xe0, 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, - 0x8f, 0xd0, 0x52, 0x8d } - }, - - /* SM4_CFB 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, - 0x3e, 0xe0, 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, - 0x8f, 0xd0, 0x52, 0x8d } - }, - - /* SM4_OFB 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, 0x3e, 0xe0, - 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, 0x8f, 0xd0, 0x52, - 0x8d } - }, - - /* SM4_CTR 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, 0x3e, 0xe0, - 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, 0x8f, 0xd0, 0x52, - 0x8d } - }, - - /* SM4_CCM 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x8e, 0x25, 0x5a, - 0x13, 0xc7, 0x43, 0x4d, 0x95, 0xef, 0x14, 0x15, 0x11, - 0xd0, 0xb9, 0x60, 0x5b } - }, - - /* SM4_GCM 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x97, 0x46, 0xde, - 0xfb, 0xc9, 0x6a, 0x85, 0x00, 0xff, 0x9c, 0x74, 0x4d, - 0xd1, 0xbb, 0xf9, 0x66 } - }, - - /* SM4_F8 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x77, 0x30, 0xff, - 0x70, 0x46, 0xbc, 0xf4, 0xe3, 0x11, 0xf6, 0x27, 0xe2, - 0xff, 0xd7, 0xc4, 0x2e } - }, - - /* SM4_XTS 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x05, 0x3f, 0xb6, - 0xe9, 0xb1, 0xff, 0x09, 0x4f, 0x9d, 0x69, 0x4d, 0xc2, - 0xb6, 0xa1, 0x15, 0xde } - }, - - /* SM4_CS1 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0x8f, 0x78, 0x76, - 0x3e, 0xe0, 0x60, 0x13, 0xe0, 0xb7, 0x62, 0x2c, 0x42, - 0x8f, 0xd0, 0x52, 0xa0 } - }, - - /* SM4_CS2 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0xa0, 0x1c, 0xfe, - 0x91, 0xaa, 0x7e, 0xf1, 0x75, 0x6a, 0xe8, 0xbc, 0xe1, - 0x55, 0x08, 0xda, 0x71 } - }, - - /* SM4_CS3 128*/ - { .min_version = 0x65, - .test[2].outlen = 16, .test[2].data = { 0xa0, 0x1c, 0xfe, - 0x91, 0xaa, 0x7e, 0xf1, 0x75, 0x6a, 0xe8, 0xbc, 0xe1, - 0x55, 0x08, 0xda, 0x71 } - }, - - /* hashes ... note they use the 2nd keysize - * array so the indecies mean different sizes!!! - */ - - /* MD5 HASH/HMAC*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0x70, 0xbc, 0x8f, 0x4b, - 0x72, 0xa8, 0x69, 0x21, 0x46, 0x8b, 0xf8, 0xe8, 0x44, - 0x1d, 0xce, 0x51, } - }, - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0xb6, 0x39, 0xc8, 0x73, - 0x16, 0x38, 0x61, 0x8b, 0x70, 0x79, 0x72, 0xaa, 0x6e, - 0x96, 0xcf, 0x90, }, - .test[4].outlen = 16, .test[4].data = { 0xb7, 0x79, 0x68, 0xea, - 0x17, 0x32, 0x1e, 0x32, 0x13, 0x90, 0x6c, 0x2e, 0x9f, - 0xd5, 0xc8, 0xb3, }, - .test[5].outlen = 16, .test[5].data = { 0x80, 0x3e, 0x0a, 0x2f, - 0x8a, 0xd8, 0x31, 0x8f, 0x8e, 0x12, 0x28, 0x86, 0x22, - 0x59, 0x6b, 0x05, }, - }, - /* SHA1*/ - { .min_version = 0x65, - .test[1].outlen = 20, .test[1].data = { 0xde, 0x8a, 0x84, 0x7b, - 0xff, 0x8c, 0x34, 0x3d, 0x69, 0xb8, 0x53, 0xa2, 0x15, - 0xe6, 0xee, 0x77, 0x5e, 0xf2, 0xef, 0x96, } - }, - { .min_version = 0x65, - .test[1].outlen = 20, .test[1].data = { 0xf8, 0x54, 0x60, 0x50, - 0x49, 0x56, 0xd1, 0xcd, 0x55, 0x5c, 0x5d, 0xcd, 0x24, - 0x33, 0xbf, 0xdc, 0x5c, 0x99, 0x54, 0xc8, }, - .test[4].outlen = 20, .test[4].data = { 0x66, 0x3f, 0x3a, 0x3c, - 0x08, 0xb6, 0x87, 0xb2, 0xd3, 0x0c, 0x5a, 0xa7, 0xcc, - 0x5c, 0xc3, 0x99, 0xb2, 0xb4, 0x58, 0x55, }, - .test[5].outlen = 20, .test[5].data = { 0x9a, 0x28, 0x54, 0x2f, - 0xaf, 0xa7, 0x0b, 0x37, 0xbe, 0x2d, 0x3e, 0xd9, 0xd4, - 0x70, 0xbc, 0xdc, 0x0b, 0x54, 0x20, 0x06, }, - }, - /* SHA224_HASH*/ - { .min_version = 0x65, - .test[1].outlen = 28, .test[1].data = { 0xb3, 0x38, 0xc7, 0x6b, - 0xcf, 0xfa, 0x1a, 0x0b, 0x3e, 0xad, 0x8d, 0xe5, 0x8d, - 0xfb, 0xff, 0x47, 0xb6, 0x3a, 0xb1, 0x15, 0x0e, 0x10, - 0xd8, 0xf1, 0x7f, 0x2b, 0xaf, 0xdf, } - }, - { .min_version = 0x65, - .test[1].outlen = 28, .test[1].data = { 0xf3, 0xb4, 0x33, 0x78, - 0x53, 0x4c, 0x0c, 0x4a, 0x1e, 0x31, 0xc2, 0xce, 0xda, - 0xc8, 0xfe, 0x74, 0x4a, 0xd2, 0x9b, 0x7c, 0x1d, 0x2f, - 0x5e, 0xa1, 0xaa, 0x31, 0xb9, 0xf5, }, - .test[4].outlen = 28, .test[4].data = { 0x4b, 0x6b, 0x3f, 0x9a, - 0x66, 0x47, 0x45, 0xe2, 0x60, 0xc9, 0x53, 0x86, 0x7a, - 0x34, 0x65, 0x7d, 0xe2, 0x24, 0x06, 0xcc, 0xf9, 0x17, - 0x20, 0x5d, 0xc2, 0xb6, 0x97, 0x9a, }, - .test[5].outlen = 28, .test[5].data = { 0x90, 0xb0, 0x6e, 0xee, - 0x21, 0x57, 0x38, 0xc7, 0x65, 0xbb, 0x9a, 0xf5, 0xb4, - 0x31, 0x0a, 0x0e, 0xe5, 0x64, 0xc4, 0x49, 0x9d, 0xbd, - 0xe9, 0xf7, 0xac, 0x9f, 0xf8, 0x05, }, - }, - - /* SHA256_HASH*/ - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0x66, 0x68, 0x7a, 0xad, - 0xf8, 0x62, 0xbd, 0x77, 0x6c, 0x8f, 0xc1, 0x8b, 0x8e, - 0x9f, 0x8e, 0x20, 0x08, 0x97, 0x14, 0x85, 0x6e, 0xe2, - 0x33, 0xb3, 0x90, 0x2a, 0x59, 0x1d, 0x0d, 0x5f, 0x29, - 0x25, } - }, - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0x75, 0x40, 0x84, 0x49, - 0x54, 0x0a, 0xf9, 0x80, 0x99, 0xeb, 0x93, 0x6b, 0xf6, - 0xd3, 0xff, 0x41, 0x05, 0x47, 0xcc, 0x82, 0x62, 0x76, - 0x32, 0xf3, 0x43, 0x74, 0x70, 0x54, 0xe2, 0x3b, 0xc0, - 0x90, }, - .test[4].outlen = 32, .test[4].data = { 0x41, 0x6c, 0x53, 0x92, - 0xb9, 0xf3, 0x6d, 0xf1, 0x88, 0xe9, 0x0e, 0xb1, 0x4d, - 0x17, 0xbf, 0x0d, 0xa1, 0x90, 0xbf, 0xdb, 0x7f, 0x1f, - 0x49, 0x56, 0xe6, 0xe5, 0x66, 0xa5, 0x69, 0xc8, 0xb1, - 0x5c, }, - .test[5].outlen = 32, .test[5].data = { 0x49, 0x1f, 0x58, 0x3b, - 0x05, 0xe2, 0x3a, 0x72, 0x1d, 0x11, 0x6d, 0xc1, 0x08, - 0xa0, 0x3f, 0x30, 0x37, 0x98, 0x36, 0x8a, 0x49, 0x4c, - 0x21, 0x1d, 0x56, 0xa5, 0x2a, 0xf3, 0x68, 0x28, 0xb7, - 0x69, }, - }, - /* SHA384_HASH*/ - { .min_version = 0x65, - .test[1].outlen = 48, .test[1].data = { 0xa3, 0x8f, 0xff, 0x4b, - 0xa2, 0x6c, 0x15, 0xe4, 0xac, 0x9c, 0xde, 0x8c, 0x03, - 0x10, 0x3a, 0xc8, 0x90, 0x80, 0xfd, 0x47, 0x54, 0x5f, - 0xde, 0x94, 0x46, 0xc8, 0xf1, 0x92, 0x72, 0x9e, 0xab, - 0x7b, 0xd0, 0x3a, 0x4d, 0x5c, 0x31, 0x87, 0xf7, 0x5f, - 0xe2, 0xa7, 0x1b, 0x0e, 0xe5, 0x0a, 0x4a, 0x40, } - }, - { .min_version = 0x65, - .test[1].outlen = 48, .test[1].data = { 0x6c, 0xd8, 0x89, 0xa0, - 0xca, 0x54, 0xa6, 0x1d, 0x24, 0xc4, 0x1d, 0xa1, 0x77, - 0x50, 0xd6, 0xf2, 0xf3, 0x43, 0x23, 0x0d, 0xb1, 0xf5, - 0xf7, 0xfc, 0xc0, 0x8c, 0xf6, 0xdf, 0x3c, 0x61, 0xfc, - 0x8a, 0xb9, 0xda, 0x12, 0x75, 0x97, 0xac, 0x51, 0x88, - 0x59, 0x19, 0x44, 0x13, 0xc0, 0x78, 0xa5, 0xa8, }, - .test[4].outlen = 48, .test[4].data = { 0x0c, 0x91, 0x36, 0x46, - 0xd9, 0x17, 0x81, 0x46, 0x1d, 0x42, 0xb1, 0x00, 0xaa, - 0xfa, 0x26, 0x92, 0x9f, 0x05, 0xc0, 0x91, 0x8e, 0x20, - 0xd7, 0x75, 0x9d, 0xd2, 0xc8, 0x9b, 0x02, 0x18, 0x20, - 0x1f, 0xdd, 0xa3, 0x32, 0xe3, 0x1e, 0xa4, 0x2b, 0xc3, - 0xc8, 0xb9, 0xb1, 0x53, 0x4e, 0x6a, 0x49, 0xd2, }, - .test[5].outlen = 48, .test[5].data = { 0x84, 0x78, 0xd2, 0xf1, - 0x44, 0x95, 0x6a, 0x22, 0x2d, 0x08, 0x19, 0xe8, 0xea, - 0x61, 0xb4, 0x86, 0xe8, 0xc6, 0xb0, 0x40, 0x51, 0x28, - 0x22, 0x54, 0x48, 0xc0, 0x70, 0x09, 0x81, 0xf9, 0xf5, - 0x47, 0x9e, 0xb3, 0x2c, 0x69, 0x19, 0xd5, 0x8d, 0x03, - 0x5d, 0x24, 0xca, 0x90, 0xa6, 0x9d, 0x80, 0x2a, }, - .test[6].outlen = 48, .test[6].data = { 0x0e, 0x68, 0x17, 0x31, - 0x01, 0xa8, 0x28, 0x0a, 0x4e, 0x47, 0x22, 0xa6, 0x89, - 0xf0, 0xc6, 0xcd, 0x4e, 0x8c, 0x19, 0x4c, 0x44, 0x3d, - 0xb5, 0xa5, 0xf9, 0xfe, 0xea, 0xc7, 0x84, 0x0b, 0x57, - 0x0d, 0xd4, 0xe4, 0x8a, 0x3f, 0x68, 0x31, 0x20, 0xd9, - 0x1f, 0xc4, 0xa3, 0x76, 0xcf, 0xdd, 0x07, 0xa6, }, - }, - /* SHA512_HASH */ - { .min_version = 0x65, - .test[1].outlen = 64, .test[1].data = { 0x50, 0x46, 0xad, 0xc1, - 0xdb, 0xa8, 0x38, 0x86, 0x7b, 0x2b, 0xbb, 0xfd, 0xd0, - 0xc3, 0x42, 0x3e, 0x58, 0xb5, 0x79, 0x70, 0xb5, 0x26, - 0x7a, 0x90, 0xf5, 0x79, 0x60, 0x92, 0x4a, 0x87, 0xf1, - 0x96, 0x0a, 0x6a, 0x85, 0xea, 0xa6, 0x42, 0xda, 0xc8, - 0x35, 0x42, 0x4b, 0x5d, 0x7c, 0x8d, 0x63, 0x7c, 0x00, - 0x40, 0x8c, 0x7a, 0x73, 0xda, 0x67, 0x2b, 0x7f, 0x49, - 0x85, 0x21, 0x42, 0x0b, 0x6d, 0xd3, } - }, - { .min_version = 0x65, - .test[1].outlen = 64, .test[1].data = { 0xec, 0xfd, 0x83, 0x74, - 0xc8, 0xa9, 0x2f, 0xd7, 0x71, 0x94, 0xd1, 0x1e, 0xe7, - 0x0f, 0x0f, 0x5e, 0x11, 0x29, 0x58, 0xb8, 0x36, 0xc6, - 0x39, 0xbc, 0xd6, 0x88, 0x6e, 0xdb, 0xc8, 0x06, 0x09, - 0x30, 0x27, 0xaa, 0x69, 0xb9, 0x2a, 0xd4, 0x67, 0x06, - 0x5c, 0x82, 0x8e, 0x90, 0xe9, 0x3e, 0x55, 0x88, 0x7d, - 0xb2, 0x2b, 0x48, 0xa2, 0x28, 0x92, 0x6c, 0x0f, 0xf1, - 0x57, 0xb5, 0xd0, 0x06, 0x1d, 0xf3, }, - .test[4].outlen = 64, .test[4].data = { 0x47, 0x88, 0x91, 0xe9, - 0x12, 0x3e, 0xfd, 0xdc, 0x26, 0x29, 0x08, 0xd6, 0x30, - 0x8f, 0xcc, 0xb6, 0x93, 0x30, 0x58, 0x69, 0x4e, 0x81, - 0xee, 0x9d, 0xb6, 0x0f, 0xc5, 0x54, 0xe6, 0x7c, 0x84, - 0xc5, 0xbc, 0x89, 0x99, 0xf0, 0xf3, 0x7f, 0x6f, 0x3f, - 0xf5, 0x04, 0x2c, 0xdf, 0x76, 0x72, 0x6a, 0xbe, 0x28, - 0x3b, 0xb8, 0x05, 0xb3, 0x47, 0x45, 0xf5, 0x7f, 0xb1, - 0x21, 0x2d, 0xe0, 0x8d, 0x1e, 0x29, }, - .test[5].outlen = 64, .test[5].data = { 0x7e, 0x55, 0xda, 0x88, - 0x28, 0xc1, 0x6e, 0x9a, 0x6a, 0x99, 0xa0, 0x37, 0x68, - 0xf0, 0x28, 0x5e, 0xe2, 0xbe, 0x00, 0xac, 0x76, 0x89, - 0x76, 0xcc, 0x5d, 0x98, 0x1b, 0x32, 0x1a, 0x14, 0xc4, - 0x2e, 0x9c, 0xe4, 0xf3, 0x3f, 0x5f, 0xa0, 0xae, 0x95, - 0x16, 0x0b, 0x14, 0xf5, 0xf5, 0x45, 0x29, 0xd8, 0xc9, - 0x43, 0xf2, 0xa9, 0xbc, 0xdc, 0x03, 0x81, 0x0d, 0x36, - 0x2f, 0xb1, 0x22, 0xe8, 0x13, 0xf8, }, - .test[6].outlen = 64, .test[6].data = { 0x5d, 0xc4, 0x80, 0x90, - 0x6b, 0x00, 0x17, 0x04, 0x34, 0x63, 0x93, 0xf1, 0xad, - 0x9a, 0x3e, 0x13, 0x37, 0x6b, 0x86, 0xd7, 0xc4, 0x2b, - 0x22, 0x9c, 0x2e, 0xf2, 0x1d, 0xde, 0x35, 0x39, 0x03, - 0x3f, 0x2b, 0x3a, 0xc3, 0x49, 0xb3, 0x32, 0x86, 0x63, - 0x6b, 0x0f, 0x27, 0x95, 0x97, 0xe5, 0xe7, 0x2b, 0x9b, - 0x80, 0xea, 0x94, 0x4d, 0x84, 0x2e, 0x39, 0x44, 0x8f, - 0x56, 0xe3, 0xcd, 0xa7, 0x12, 0x3e, }, - }, - /* SHA512_224_HASH */ - { .min_version = 0x65, - .test[1].outlen = 28, .test[1].data = { 0x9e, 0x7d, 0x60, 0x80, - 0xde, 0xf4, 0xe1, 0xcc, 0xf4, 0xae, 0xaa, 0xc6, 0xf7, - 0xfa, 0xd0, 0x08, 0xd0, 0x60, 0xa6, 0xcf, 0x87, 0x06, - 0x20, 0x38, 0xd6, 0x16, 0x67, 0x74, } - }, - { .min_version = 0x65, - .test[1].outlen = 28, .test[1].data = { 0xff, 0xfb, 0x43, 0x27, - 0xdd, 0x2e, 0x39, 0xa0, 0x18, 0xa8, 0xaf, 0xde, 0x84, - 0x0b, 0x5d, 0x0f, 0x3d, 0xdc, 0xc6, 0x17, 0xd1, 0xb6, - 0x2f, 0x8c, 0xf8, 0x7e, 0x34, 0x34, }, - .test[4].outlen = 28, .test[4].data = { 0x00, 0x19, 0xe2, 0x2d, - 0x44, 0x80, 0x2d, 0xd8, 0x1c, 0x57, 0xf5, 0x57, 0x92, - 0x08, 0x13, 0xe7, 0x9d, 0xbb, 0x2b, 0xc2, 0x8d, 0x77, - 0xc1, 0xff, 0x71, 0x4c, 0xf0, 0xa9, }, - .test[5].outlen = 28, .test[5].data = { 0x6a, 0xc4, 0xa8, 0x73, - 0x21, 0x54, 0xb2, 0x82, 0xee, 0x89, 0x8d, 0x45, 0xd4, - 0xe3, 0x76, 0x3e, 0x04, 0x03, 0xc9, 0x71, 0xee, 0x01, - 0x25, 0xd2, 0x7b, 0xa1, 0x20, 0xc4, }, - .test[6].outlen = 28, .test[6].data = { 0x0f, 0x98, 0x15, 0x9b, - 0x11, 0xca, 0x60, 0xc7, 0x82, 0x39, 0x1a, 0x50, 0x8c, - 0xe4, 0x79, 0xfa, 0xa8, 0x0e, 0xc7, 0x12, 0xfd, 0x8c, - 0x9c, 0x99, 0x7a, 0xe8, 0x7e, 0x92, }, - }, - /* SHA512_256_HASH*/ - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0xaf, 0x13, 0xc0, 0x48, - 0x99, 0x12, 0x24, 0xa5, 0xe4, 0xc6, 0x64, 0x44, 0x6b, - 0x68, 0x8a, 0xaf, 0x48, 0xfb, 0x54, 0x56, 0xdb, 0x36, - 0x29, 0x60, 0x1b, 0x00, 0xec, 0x16, 0x0c, 0x74, 0xe5, - 0x54, } - }, - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0x3a, 0x2c, 0xd0, 0x2b, - 0xfa, 0xa6, 0x72, 0xe4, 0xf1, 0xab, 0x0a, 0x3e, 0x70, - 0xe4, 0x88, 0x1a, 0x92, 0xe1, 0x3b, 0x64, 0x5a, 0x9b, - 0xed, 0xb3, 0x97, 0xc0, 0x17, 0x1f, 0xd4, 0x05, 0xf1, - 0x72, }, - .test[4].outlen = 32, .test[4].data = { 0x6f, 0x2d, 0xae, 0xc6, - 0xe4, 0xa6, 0x5b, 0x52, 0x0f, 0x26, 0x16, 0xf6, 0xa9, - 0xc1, 0x23, 0xc2, 0xb3, 0x67, 0xfc, 0x69, 0xac, 0x73, - 0x87, 0xa2, 0x5b, 0x6c, 0x44, 0xad, 0xc5, 0x26, 0x2b, - 0x10, }, - .test[5].outlen = 32, .test[5].data = { 0x63, 0xe7, 0xb8, 0xd1, - 0x76, 0x33, 0x56, 0x29, 0xba, 0x99, 0x86, 0x42, 0x0d, - 0x4f, 0xf7, 0x54, 0x8c, 0xb9, 0x39, 0xf2, 0x72, 0x1d, - 0x0e, 0x9d, 0x80, 0x67, 0xd9, 0xab, 0x15, 0xb0, 0x68, - 0x18, }, - .test[6].outlen = 32, .test[6].data = { 0x64, 0x78, 0x56, 0xd7, - 0xaf, 0x5b, 0x56, 0x08, 0xf1, 0x44, 0xf7, 0x4f, 0xa1, - 0xa1, 0x13, 0x79, 0x6c, 0xb1, 0x31, 0x11, 0xf3, 0x75, - 0xf4, 0x8c, 0xb4, 0x9f, 0xbf, 0xb1, 0x60, 0x38, 0x3d, - 0x28, }, - }, - - /* AESXCBC*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0x35, 0xd9, 0xdc, 0xdb, - 0x82, 0x9f, 0xec, 0x33, 0x52, 0xe7, 0xbf, 0x10, 0xb8, - 0x4b, 0xe4, 0xa5, }, - .test[3].outlen = 16, .test[3].data = { 0x39, 0x6f, 0x99, 0xb5, - 0x43, 0x33, 0x67, 0x4e, 0xd4, 0x45, 0x8f, 0x80, 0x77, - 0xe4, 0xd4, 0x14, }, - .test[4].outlen = 16, .test[4].data = { 0x73, 0xd4, 0x7c, 0x38, - 0x37, 0x4f, 0x73, 0xd0, 0x78, 0xa8, 0xc6, 0xec, 0x05, - 0x67, 0xca, 0x5e, }, - }, - - /* AESCMAC*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0x15, 0xbe, 0x1b, 0xfd, - 0x8c, 0xbb, 0xaf, 0x8b, 0x51, 0x9a, 0x64, 0x3b, 0x1b, - 0x46, 0xc1, 0x8f, }, - .test[3].outlen = 16, .test[3].data = { 0x4e, 0x02, 0xd6, 0xec, - 0x92, 0x75, 0x88, 0xb4, 0x3e, 0x83, 0xa7, 0xac, 0x32, - 0xb6, 0x2b, 0xdb, }, - .test[4].outlen = 16, .test[4].data = { 0xa7, 0x37, 0x01, 0xbe, - 0xe8, 0xce, 0xed, 0x44, 0x49, 0x4a, 0xbb, 0xf6, 0x9e, - 0xd9, 0x31, 0x3e, }, - }, - - /* KASUMIF9*/ - { .min_version = 0x65, - .test[1].outlen = 4, .test[1].data = { 0x5b, 0x26, 0x81, 0x06 - } - }, - - /* SNOW3G UIA2*/ - { .min_version = 0x65, - .test[1].outlen = 4, .test[1].data = { 0x08, 0xed, 0x2c, 0x76, - } - }, - - /* ZUC UIA3*/ - { .min_version = 0x65, - .test[1].outlen = 4, .test[1].data = { 0x6a, 0x2b, 0x4c, 0x3a, - } - }, - - /* POLY1305*/ - { .min_version = 0x65, - .test[4].outlen = 16, .test[4].data = { 0xef, 0x91, 0x06, 0x4e, - 0xce, 0x99, 0x9c, 0x4e, 0xfd, 0x05, 0x6a, 0x8c, 0xe6, - 0x18, 0x23, 0xad } - }, - - /* SSLMAC MD5*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0x0e, 0xf4, 0xca, 0x32, - 0x32, 0x40, 0x1d, 0x1b, 0xaa, 0xfd, 0x6d, 0xa8, 0x01, - 0x79, 0xed, 0xcd, }, - }, - - /* SSLMAC_SHA1*/ - { .min_version = 0x65, - .test[2].outlen = 20, .test[2].data = { 0x05, 0x9d, 0x99, 0xb4, - 0xf3, 0x03, 0x1e, 0xc5, 0x24, 0xbf, 0xec, 0xdf, 0x64, - 0x8e, 0x37, 0x2e, 0xf0, 0xef, 0x93, 0xa0, }, - }, - - /* CRC32*/ - { .min_version = 0x65, - .test[0].outlen = 0 - }, - - /* TKIP-MIC*/ - { .min_version = 0x65, - .test[0].outlen = 8, .test[0].data = { 0x16, 0xfb, 0xa0, - 0x0e, 0xe2, 0xab, 0x6c, 0x97, } - }, - - /* SHA3-224*/ - { .min_version = 0x65, - .test[1].outlen = 28, .test[1].data = { 0x73, 0xe0, 0x87, - 0xae, 0x12, 0x71, 0xb2, 0xc5, 0xf6, 0x85, 0x46, 0xc9, - 0x3a, 0xb4, 0x25, 0x14, 0xa6, 0x9e, 0xef, 0x25, 0x2b, - 0xfd, 0xd1, 0x37, 0x55, 0x74, 0x8a, 0x00, } - }, - - /* SHA3-256*/ - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0x9e, 0x62, 0x91, 0x97, - 0x0c, 0xb4, 0x4d, 0xd9, 0x40, 0x08, 0xc7, 0x9b, 0xca, - 0xf9, 0xd8, 0x6f, 0x18, 0xb4, 0xb4, 0x9b, 0xa5, 0xb2, - 0xa0, 0x47, 0x81, 0xdb, 0x71, 0x99, 0xed, 0x3b, 0x9e, - 0x4e, } - }, - - /* SHA3-384*/ - { .min_version = 0x65, - .test[1].outlen = 48, .test[1].data = { 0x4b, 0xda, 0xab, - 0xf7, 0x88, 0xd3, 0xad, 0x1a, 0xd8, 0x3d, 0x6d, 0x93, - 0xc7, 0xe4, 0x49, 0x37, 0xc2, 0xe6, 0x49, 0x6a, 0xf2, - 0x3b, 0xe3, 0x35, 0x4d, 0x75, 0x69, 0x87, 0xf4, 0x51, - 0x60, 0xfc, 0x40, 0x23, 0xbd, 0xa9, 0x5e, 0xcd, 0xcb, - 0x3c, 0x7e, 0x31, 0xa6, 0x2f, 0x72, 0x6d, 0x70, 0x2c, - } - }, - - /* SHA3-512*/ - { .min_version = 0x65, - .test[1].outlen = 64, .test[1].data = { 0xad, 0x56, 0xc3, 0x5c, - 0xab, 0x50, 0x63, 0xb9, 0xe7, 0xea, 0x56, 0x83, 0x14, - 0xec, 0x81, 0xc4, 0x0b, 0xa5, 0x77, 0xaa, 0xe6, 0x30, - 0xde, 0x90, 0x20, 0x04, 0x00, 0x9e, 0x88, 0xf1, 0x8d, - 0xa5, 0x7b, 0xbd, 0xfd, 0xaa, 0xa0, 0xfc, 0x18, 0x9c, - 0x66, 0xc8, 0xd8, 0x53, 0x24, 0x8b, 0x6b, 0x11, 0x88, - 0x44, 0xd5, 0x3f, 0x7d, 0x0b, 0xa1, 0x1d, 0xe0, 0xf3, - 0xbf, 0xaf, 0x4c, 0xdd, 0x9b, 0x3f, } - }, - - /* SHAKE128*/ - { .min_version = 0x65, - .test[4].outlen = 16, .test[4].data = { 0x24, 0xa7, 0xca, - 0x4b, 0x75, 0xe3, 0x89, 0x8d, 0x4f, 0x12, 0xe7, 0x4d, - 0xea, 0x8c, 0xbb, 0x65 } - }, - - /* SHAKE256*/ - { .min_version = 0x65, - .test[4].outlen = 32, .test[4].data = { 0xf5, 0x97, 0x7c, - 0x82, 0x83, 0x54, 0x6a, 0x63, 0x72, 0x3b, 0xc3, 0x1d, - 0x26, 0x19, 0x12, 0x4f, - 0x11, 0xdb, 0x46, 0x58, 0x64, 0x33, 0x36, 0x74, 0x1d, - 0xf8, 0x17, 0x57, 0xd5, 0xad, 0x30, 0x62 } - }, - - /* CSHAKE128*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0xe0, 0x6f, 0xd8, - 0x50, 0x57, 0x6f, 0xe4, 0xfa, 0x7e, 0x13, 0x42, 0xb5, - 0xf8, 0x13, 0xeb, 0x23 } - }, - - /* CSHAKE256*/ - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0xf3, 0xf2, 0xb5, - 0x47, 0xf2, 0x16, 0xba, 0x6f, 0x49, 0x83, 0x3e, 0xad, - 0x1e, 0x46, 0x85, 0x54, - 0xd0, 0xd7, 0xf9, 0xc6, 0x7e, 0xe9, 0x27, 0xc6, 0xc3, - 0xc3, 0xdb, 0x91, 0xdb, 0x97, 0x04, 0x0f } - }, - - /* KMAC128*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0x6c, 0x3f, 0x29, - 0xfe, 0x01, 0x96, 0x59, 0x36, 0xb7, 0xae, 0xb7, 0xff, - 0x71, 0xe0, 0x3d, 0xff }, - .test[4].outlen = 16, .test[4].data = { 0x58, 0xd9, 0x8d, - 0xe8, 0x1f, 0x64, 0xb4, 0xa3, 0x9f, 0x63, 0xaf, 0x21, - 0x99, 0x03, 0x97, 0x06 }, - .test[5].outlen = 16, .test[5].data = { 0xf8, 0xf9, 0xb7, - 0xa4, 0x05, 0x3d, 0x90, 0x7c, 0xf2, 0xa1, 0x7c, 0x34, - 0x39, 0xc2, 0x87, 0x4b }, - .test[6].outlen = 16, .test[6].data = { 0xef, 0x4a, 0xd5, - 0x1d, 0xd7, 0x83, 0x56, 0xd3, 0xa8, 0x3c, 0xf5, 0xf8, - 0xd1, 0x12, 0xf4, 0x44 } - }, - - /* KMAC256*/ - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0x0d, 0x86, 0xfa, - 0x92, 0x92, 0xe4, 0x77, 0x24, 0x6a, 0xcc, 0x79, 0xa0, - 0x1e, 0xb4, 0xc3, 0xac, - 0xfc, 0x56, 0xbc, 0x63, 0xcc, 0x1b, 0x6e, 0xf6, 0xc8, - 0x99, 0xa5, 0x3a, 0x38, 0x14, 0xa2, 0x40 }, - .test[4].outlen = 32, .test[4].data = { 0xad, 0x99, 0xed, - 0x20, 0x1f, 0xbe, 0x45, 0x07, 0x3d, 0xf4, 0xae, 0x9f, - 0xc2, 0xd8, 0x06, 0x18, - 0x31, 0x4e, 0x8c, 0xb6, 0x33, 0xe8, 0x31, 0x36, 0x00, - 0xdd, 0x42, 0x20, 0xda, 0x2b, 0xd5, 0x2b }, - .test[5].outlen = 32, .test[5].data = { 0xf9, 0xc6, 0x2b, - 0x17, 0xa0, 0x04, 0xd9, 0xf2, 0x6c, 0xbf, 0x5d, 0xa5, - 0x9a, 0xd7, 0x36, 0x1d, - 0xad, 0x66, 0x6b, 0x3d, 0xb1, 0x52, 0xd3, 0x81, 0x39, - 0x20, 0xd4, 0xf0, 0x43, 0x72, 0x2c, 0xb7 }, - .test[6].outlen = 32, .test[6].data = { 0xcc, 0x89, 0xe4, - 0x05, 0x58, 0x77, 0x38, 0x8b, 0x18, 0xa0, 0x7c, 0x8d, - 0x20, 0x99, 0xea, 0x6e, - 0x6b, 0xe9, 0xf7, 0x0c, 0xe1, 0xe5, 0xce, 0xbc, 0x55, - 0x4c, 0x80, 0xa5, 0xdc, 0xae, 0xf7, 0x94 } - }, - - /* KMAC128XOF*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0x84, 0x07, 0x89, - 0x29, 0xa7, 0xf4, 0x98, 0x91, 0xf5, 0x64, 0x61, 0x8d, - 0xa5, 0x93, 0x00, 0x31 }, - .test[4].outlen = 16, .test[4].data = { 0xf0, 0xa4, 0x1b, - 0x98, 0x0f, 0xb3, 0xf2, 0xbd, 0xc3, 0xfc, 0x64, 0x1f, - 0x73, 0x1f, 0xd4, 0x74 }, - .test[5].outlen = 16, .test[5].data = { 0xa5, 0xc5, 0xad, - 0x25, 0x59, 0xf1, 0x5d, 0xea, 0x5b, 0x18, 0x0a, 0x52, - 0xce, 0x6c, 0xc0, 0x88 }, - .test[6].outlen = 16, .test[6].data = { 0x1a, 0x81, 0xdd, - 0x81, 0x47, 0x89, 0xf4, 0x15, 0xcc, 0x18, 0x05, 0x81, - 0xe3, 0x95, 0x21, 0xc3 } - }, - - /* KMAC256XOF*/ - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0xff, 0x85, 0xe9, - 0x61, 0x67, 0x96, 0x35, 0x58, 0x33, 0x38, 0x2c, 0xe8, - 0x25, 0x77, 0xbe, 0x63, - 0xd5, 0x2c, 0xa7, 0xef, 0xce, 0x9b, 0x63, 0x71, 0xb2, - 0x09, 0x7c, 0xd8, 0x60, 0x4e, 0x5a, 0xfa }, - .test[4].outlen = 32, .test[4].data = { 0x86, 0x89, 0xc2, - 0x4a, 0xe8, 0x18, 0x46, 0x10, 0x6b, 0xf2, 0x09, 0xd7, - 0x37, 0x83, 0xab, 0x77, - 0xb5, 0xce, 0x7c, 0x96, 0x9c, 0xfa, 0x0f, 0xa0, 0xd8, - 0xde, 0xb5, 0xb7, 0xc6, 0xcd, 0xa9, 0x8f }, - .test[5].outlen = 32, .test[5].data = { 0x4d, 0x71, 0x81, - 0x5a, 0x5f, 0xac, 0x3b, 0x29, 0xf2, 0x5f, 0xb6, 0x56, - 0xf1, 0x76, 0xcf, 0xdc, - 0x51, 0x56, 0xd7, 0x3c, 0x47, 0xec, 0x6d, 0xea, 0xc6, - 0x3e, 0x54, 0xe7, 0x6f, 0xdc, 0xe8, 0x39 }, - .test[6].outlen = 32, .test[6].data = { 0x5f, 0xc5, 0xe1, - 0x1e, 0xe7, 0x55, 0x0f, 0x62, 0x71, 0x29, 0xf3, 0x0a, - 0xb3, 0x30, 0x68, 0x06, - 0xea, 0xec, 0xe4, 0x37, 0x17, 0x37, 0x2d, 0x5d, 0x64, - 0x09, 0x70, 0x63, 0x94, 0x80, 0x9b, 0x80 } - }, - - /* HASH SM3*/ - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0xe0, 0xba, 0xb8, - 0xf4, 0xd8, 0x17, 0x2b, 0xa2, 0x45, 0x19, 0x0d, 0x13, - 0xc9, 0x41, 0x17, 0xe9, - 0x3b, 0x82, 0x16, 0x6c, 0x25, 0xb2, 0xb6, 0x98, 0x83, - 0x35, 0x0c, 0x19, 0x2c, 0x90, 0x51, 0x40 }, - .test[4].outlen = 32, .test[4].data = { 0xe0, 0xba, 0xb8, - 0xf4, 0xd8, 0x17, 0x2b, 0xa2, 0x45, 0x19, 0x0d, 0x13, - 0xc9, 0x41, 0x17, 0xe9, - 0x3b, 0x82, 0x16, 0x6c, 0x25, 0xb2, 0xb6, 0x98, 0x83, - 0x35, 0x0c, 0x19, 0x2c, 0x90, 0x51, 0x40 }, - .test[5].outlen = 32, .test[5].data = { 0xe0, 0xba, 0xb8, - 0xf4, 0xd8, 0x17, 0x2b, 0xa2, 0x45, 0x19, 0x0d, 0x13, - 0xc9, 0x41, 0x17, 0xe9, - 0x3b, 0x82, 0x16, 0x6c, 0x25, 0xb2, 0xb6, 0x98, 0x83, - 0x35, 0x0c, 0x19, 0x2c, 0x90, 0x51, 0x40 }, - .test[6].outlen = 32, .test[6].data = { 0xe0, 0xba, 0xb8, - 0xf4, 0xd8, 0x17, 0x2b, 0xa2, 0x45, 0x19, 0x0d, 0x13, - 0xc9, 0x41, 0x17, 0xe9, - 0x3b, 0x82, 0x16, 0x6c, 0x25, 0xb2, 0xb6, 0x98, 0x83, - 0x35, 0x0c, 0x19, 0x2c, 0x90, 0x51, 0x40 } - }, - - /* HMAC SM3*/ - { .min_version = 0x65, - .test[1].outlen = 32, .test[1].data = { 0x68, 0xf0, 0x65, - 0xd8, 0xd8, 0xc9, 0xc2, 0x0e, 0x10, 0xfd, 0x52, 0x7c, - 0xf2, 0xd7, 0x42, 0xd3, - 0x08, 0x44, 0x22, 0xbc, 0xf0, 0x9d, 0xcc, 0x34, 0x7b, - 0x76, 0x13, 0x91, 0xba, 0xce, 0x4d, 0x17 }, - .test[4].outlen = 32, .test[4].data = { 0xd8, 0xab, 0x2a, - 0x7b, 0x56, 0x21, 0xb1, 0x59, 0x64, 0xb2, 0xa3, 0xd6, - 0x72, 0xb3, 0x95, 0x81, - 0xa0, 0xcd, 0x96, 0x47, 0xf0, 0xbc, 0x8c, 0x16, 0x5b, - 0x9b, 0x7d, 0x2f, 0x71, 0x3f, 0x23, 0x19}, - .test[5].outlen = 32, .test[5].data = { 0xa0, 0xd1, 0xd5, - 0xa0, 0x9e, 0x4c, 0xca, 0x8c, 0x7b, 0xe0, 0x8f, 0x70, - 0x92, 0x2e, 0x3f, 0x4c, - 0xa0, 0xca, 0xef, 0xa1, 0x86, 0x9d, 0xb2, 0xe1, 0xc5, - 0xfa, 0x9d, 0xfa, 0xbc, 0x11, 0xcb, 0x1f }, - .test[6].outlen = 32, .test[6].data = { 0xa0, 0xd1, 0xd5, - 0xa0, 0x9e, 0x4c, 0xca, 0x8c, 0x7b, 0xe0, 0x8f, 0x70, - 0x92, 0x2e, 0x3f, 0x4c, - 0xa0, 0xca, 0xef, 0xa1, 0x86, 0x9d, 0xb2, 0xe1, 0xc5, - 0xfa, 0x9d, 0xfa, 0xbc, 0x11, 0xcb, 0x1f} - }, - - /* MAC_SM4_XCBC*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0x69, 0xaf, 0x45, - 0xe6, 0x0c, 0x78, 0x71, 0x7e, 0x44, 0x6c, 0xfe, 0x68, - 0xd4, 0xfe, 0x20, 0x8b }, - .test[4].outlen = 16, .test[4].data = { 0x69, 0xaf, 0x45, - 0xe6, 0x0c, 0x78, 0x71, 0x7e, 0x44, 0x6c, 0xfe, 0x68, - 0xd4, 0xfe, 0x20, 0x8b }, - .test[5].outlen = 16, .test[5].data = { 0x69, 0xaf, 0x45, - 0xe6, 0x0c, 0x78, 0x71, 0x7e, 0x44, 0x6c, 0xfe, 0x68, - 0xd4, 0xfe, 0x20, 0x8b }, - .test[6].outlen = 16, .test[6].data = { 0x69, 0xaf, 0x45, - 0xe6, 0x0c, 0x78, 0x71, 0x7e, 0x44, 0x6c, 0xfe, 0x68, - 0xd4, 0xfe, 0x20, 0x8b } - }, - - /* MAC_SM4_CMAC*/ - { .min_version = 0x65, - .test[1].outlen = 16, .test[1].data = { 0x36, 0xbe, 0xec, - 0x03, 0x9c, 0xc7, 0x0c, 0x28, 0x23, 0xdd, 0x71, 0x8b, - 0x3c, 0xbd, 0x7f, 0x37 }, - .test[4].outlen = 16, .test[4].data = { 0x36, 0xbe, 0xec, - 0x03, 0x9c, 0xc7, 0x0c, 0x28, 0x23, 0xdd, 0x71, 0x8b, - 0x3c, 0xbd, 0x7f, 0x37 }, - .test[5].outlen = 16, .test[5].data = { 0x36, 0xbe, 0xec, - 0x03, 0x9c, 0xc7, 0x0c, 0x28, 0x23, 0xdd, 0x71, 0x8b, - 0x3c, 0xbd, 0x7f, 0x37 }, - .test[6].outlen = 16, .test[6].data = { 0x36, 0xbe, 0xec, - 0x03, 0x9c, 0xc7, 0x0c, 0x28, 0x23, 0xdd, 0x71, 0x8b, - 0x3c, 0xbd, 0x7f, 0x37 } - }, - -}; -#endif - -int spacc_sg_to_ddt(struct device *dev, struct scatterlist *sg, - int nbytes, struct pdu_ddt *ddt, int dma_direction) -{ - struct scatterlist *sg_entry, *sgl; - int nents, orig_nents; - int i, rc; - - orig_nents = sg_nents(sg); - if (orig_nents > 1) { - sgl = sg_last(sg, orig_nents); - if (sgl->length == 0) - orig_nents--; - } - nents = dma_map_sg(dev, sg, orig_nents, dma_direction); - - if (nents <= 0) - return -ENOMEM; - - /* require ATOMIC operations */ - rc = pdu_ddt_init(ddt, nents | 0x80000000); - if (rc < 0) { - dma_unmap_sg(dev, sg, nents, dma_direction); - return -EIO; - } - - for_each_sg(sg, sg_entry, nents, i) { - pdu_ddt_add(ddt, sg_dma_address(sg_entry), - sg_dma_len(sg_entry)); - } - - dma_sync_sg_for_device(dev, sg, nents, dma_direction); - - return nents; -} - -int spacc_set_operation(struct spacc_device *spacc, int handle, int op, - u32 prot, uint32_t icvcmd, uint32_t icvoff, - uint32_t icvsz, uint32_t sec_key) -{ - int ret = CRYPTO_OK; - struct spacc_job *job = NULL; - - if (handle < 0 || handle >= SPACC_MAX_JOBS) - return -ENXIO; - - job = &spacc->job[handle]; - if (!job) - return -EIO; - - job->op = op; - if (op == OP_ENCRYPT) - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ENCRYPT); - else - job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_ENCRYPT); - - switch (prot) { - case ICV_HASH: /* HASH of plaintext */ - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_PT); - break; - case ICV_HASH_ENCRYPT: - /* HASH the plaintext and encrypt the lot */ - /* ICV_PT and ICV_APPEND must be set too */ - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_ENC); - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_PT); - /* This mode is not valid when BIT_ALIGN != 0 */ - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_APPEND); - break; - case ICV_ENCRYPT_HASH: /* HASH the ciphertext */ - job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_ICV_PT); - job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_ICV_ENC); - break; - case ICV_IGNORE: - break; - default: - ret = -EINVAL; - } - - job->icv_len = icvsz; - - switch (icvcmd) { - case IP_ICV_OFFSET: - job->icv_offset = icvoff; - job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_ICV_APPEND); - break; - case IP_ICV_APPEND: - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_ICV_APPEND); - break; - case IP_ICV_IGNORE: - break; - default: - ret = -EINVAL; - } - - if (sec_key) - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_SEC_KEY); - - return ret; -} - -static int _spacc_fifo_full(struct spacc_device *spacc, uint32_t prio) -{ - if (spacc->config.is_qos) - return readl(spacc->regmap + SPACC_REG_FIFO_STAT) & - SPACC_FIFO_STAT_CMDX_FULL(prio); - else - return readl(spacc->regmap + SPACC_REG_FIFO_STAT) & - SPACC_FIFO_STAT_CMD0_FULL; -} - -/* When proc_sz != 0 it overrides the ddt_len value - * defined in the context referenced by 'job_idx' - */ -int spacc_packet_enqueue_ddt_ex(struct spacc_device *spacc, int use_jb, - int job_idx, struct pdu_ddt *src_ddt, - struct pdu_ddt *dst_ddt, u32 proc_sz, - uint32_t aad_offset, uint32_t pre_aad_sz, - u32 post_aad_sz, uint32_t iv_offset, - uint32_t prio) -{ - int i; - struct spacc_job *job; - int proc_len; - - if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) - return -ENXIO; - - switch (prio) { - case SPACC_SW_CTRL_PRIO_MED: - if (spacc->config.cmd1_fifo_depth == 0) - return -EINVAL; - break; - case SPACC_SW_CTRL_PRIO_LOW: - if (spacc->config.cmd2_fifo_depth == 0) - return -EINVAL; - break; - } - - job = &spacc->job[job_idx]; - if (!job) - return -EIO; - - /* process any jobs in the jb*/ - if (use_jb && spacc_process_jb(spacc) != 0) - goto fifo_full; - - if (_spacc_fifo_full(spacc, prio)) { - if (use_jb) - goto fifo_full; - else - return -EBUSY; - } - - /* compute the length we must process, in decrypt mode - * with an ICV (hash, hmac or CCM modes) - * we must subtract the icv length from the buffer size - */ - if (proc_sz == SPACC_AUTO_SIZE) { - proc_len = src_ddt->len; - - if (job->op == OP_DECRYPT && - (job->hash_mode > 0 || - job->enc_mode == CRYPTO_MODE_AES_CCM || - job->enc_mode == CRYPTO_MODE_AES_GCM) && - !(job->ctrl & SPACC_CTRL_MASK(SPACC_CTRL_ICV_ENC))) - proc_len = src_ddt->len - job->icv_len; - } else { - proc_len = proc_sz; - } - - if (pre_aad_sz & SPACC_AADCOPY_FLAG) { - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_AAD_COPY); - pre_aad_sz &= ~(SPACC_AADCOPY_FLAG); - } else { - job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_AAD_COPY); - } - - job->pre_aad_sz = pre_aad_sz; - job->post_aad_sz = post_aad_sz; - - if (spacc->config.dma_type == SPACC_DMA_DDT) { - pdu_io_cached_write(spacc->regmap + SPACC_REG_SRC_PTR, - (uint32_t)src_ddt->phys, - &spacc->cache.src_ptr); - pdu_io_cached_write(spacc->regmap + SPACC_REG_DST_PTR, - (uint32_t)dst_ddt->phys, - &spacc->cache.dst_ptr); - } else if (spacc->config.dma_type == SPACC_DMA_LINEAR) { - pdu_io_cached_write(spacc->regmap + SPACC_REG_SRC_PTR, - (uint32_t)src_ddt->virt[0], - &spacc->cache.src_ptr); - pdu_io_cached_write(spacc->regmap + SPACC_REG_DST_PTR, - (uint32_t)dst_ddt->virt[0], - &spacc->cache.dst_ptr); - } else { - return -EIO; - } - - pdu_io_cached_write(spacc->regmap + SPACC_REG_PROC_LEN, - proc_len - job->post_aad_sz, - &spacc->cache.proc_len); - pdu_io_cached_write(spacc->regmap + SPACC_REG_ICV_LEN, - job->icv_len, &spacc->cache.icv_len); - pdu_io_cached_write(spacc->regmap + SPACC_REG_ICV_OFFSET, - job->icv_offset, &spacc->cache.icv_offset); - pdu_io_cached_write(spacc->regmap + SPACC_REG_PRE_AAD_LEN, - job->pre_aad_sz, &spacc->cache.pre_aad); - pdu_io_cached_write(spacc->regmap + SPACC_REG_POST_AAD_LEN, - job->post_aad_sz, &spacc->cache.post_aad); - pdu_io_cached_write(spacc->regmap + SPACC_REG_IV_OFFSET, - iv_offset, &spacc->cache.iv_offset); - pdu_io_cached_write(spacc->regmap + SPACC_REG_OFFSET, - aad_offset, &spacc->cache.offset); - pdu_io_cached_write(spacc->regmap + SPACC_REG_AUX_INFO, - AUX_DIR(job->auxinfo_dir) | - AUX_BIT_ALIGN(job->auxinfo_bit_align) | - AUX_CBC_CS(job->auxinfo_cs_mode), - &spacc->cache.aux); - - if (job->first_use == 1) { - writel(job->ckey_sz | SPACC_SET_KEY_CTX(job->ctx_idx), - spacc->regmap + SPACC_REG_KEY_SZ); - writel(job->hkey_sz | SPACC_SET_KEY_CTX(job->ctx_idx), - spacc->regmap + SPACC_REG_KEY_SZ); - } - - job->job_swid = spacc->job_next_swid; - spacc->job_lookup[job->job_swid] = job_idx; - spacc->job_next_swid = - (spacc->job_next_swid + 1) % SPACC_MAX_JOBS; - writel(SPACC_SW_CTRL_ID_SET(job->job_swid) | - SPACC_SW_CTRL_PRIO_SET(prio), - spacc->regmap + SPACC_REG_SW_CTRL); - writel(job->ctrl, spacc->regmap + SPACC_REG_CTRL); - - /* Clear an expansion key after the first call*/ - if (job->first_use == 1) { - job->first_use = 0; - job->ctrl &= ~SPACC_CTRL_MASK(SPACC_CTRL_KEY_EXP); - } - - return CRYPTO_OK; - -fifo_full: - /* try to add a job to the job buffers*/ - i = spacc->jb_head + 1; - if (i == SPACC_MAX_JOB_BUFFERS) - i = 0; - - if (i == spacc->jb_tail) - return -EBUSY; - - spacc->job_buffer[spacc->jb_head] = (struct spacc_job_buffer) { - .active = 1, - .job_idx = job_idx, - .src = src_ddt, - .dst = dst_ddt, - .proc_sz = proc_sz, - .aad_offset = aad_offset, - .pre_aad_sz = pre_aad_sz, - .post_aad_sz = post_aad_sz, - .iv_offset = iv_offset, - .prio = prio - }; - - spacc->jb_head = i; - - return CRYPTO_OK; -} - -int spacc_packet_enqueue_ddt(struct spacc_device *spacc, int job_idx, - struct pdu_ddt *src_ddt, struct pdu_ddt *dst_ddt, - u32 proc_sz, u32 aad_offset, uint32_t pre_aad_sz, - uint32_t post_aad_sz, u32 iv_offset, uint32_t prio) -{ - int ret; - unsigned long lock_flags; - - spin_lock_irqsave(&spacc->lock, lock_flags); - ret = spacc_packet_enqueue_ddt_ex(spacc, 1, job_idx, src_ddt, - dst_ddt, proc_sz, aad_offset, - pre_aad_sz, post_aad_sz, - iv_offset, prio); - spin_unlock_irqrestore(&spacc->lock, lock_flags); - - return ret; -} - -static int spacc_packet_dequeue(struct spacc_device *spacc, int job_idx) -{ - int ret = CRYPTO_OK; - struct spacc_job *job = &spacc->job[job_idx]; - unsigned long lock_flag; - - spin_lock_irqsave(&spacc->lock, lock_flag); - - if (!job && !(job_idx == SPACC_JOB_IDX_UNUSED)) { - ret = -EIO; - } else if (job->job_done) { - job->job_done = 0; - ret = job->job_err; - } else { - ret = -EINPROGRESS; - } - - spin_unlock_irqrestore(&spacc->lock, lock_flag); - - return ret; -} - -int spacc_isenabled(struct spacc_device *spacc, int mode, int keysize) -{ - int x; - - if (mode < 0 || mode >= CRYPTO_MODE_LAST) - return 0; - - if (mode == CRYPTO_MODE_NULL || - mode == CRYPTO_MODE_AES_XTS || - mode == CRYPTO_MODE_SM4_XTS || - mode == CRYPTO_MODE_AES_F8 || - mode == CRYPTO_MODE_SM4_F8 || - spacc->config.modes[mode] & 128) - return 1; - - for (x = 0; x < 6; x++) { - if (keysizes[0][x] == keysize) { - if (spacc->config.modes[mode] & (1 << x)) - return 1; - else - return 0; - } - } - - return 0; -} - -/* Releases a crypto context back into appropriate module's pool*/ -int spacc_close(struct spacc_device *dev, int handle) -{ - return spacc_job_release(dev, handle); -} - -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AUTODETECT) -static int spacc_set_auxinfo(struct spacc_device *spacc, int jobid, - uint32_t direction, uint32_t bitsize) -{ - int ret = CRYPTO_OK; - struct spacc_job *job; - - if (jobid < 0 || jobid >= SPACC_MAX_JOBS) - return -ENXIO; - - job = &spacc->job[jobid]; - if (!job) { - ret = -EIO; - } else { - job->auxinfo_dir = direction; - job->auxinfo_bit_align = bitsize; - } - - return ret; -} - -static void check_modes(struct spacc_device *spacc, int x, int y, void *virt, - char *key, struct pdu_ddt *ddt) -{ - int proclen, aadlen, ivsize, h, err, enc, hash; - - if (template[x] & (1 << y)) { - /* testing keysizes[y] with algo 'x' which - * should match the ENUMs above - */ - - if (template[x] & 128) { - enc = 0; - hash = x; - } else { - enc = x; - hash = 0; - } - - h = spacc_open(spacc, enc, hash, -1, 0, NULL, NULL); - if (h < 0) { - spacc->config.modes[x] &= ~(1 << y); - return; - } - - spacc_set_operation(spacc, h, OP_ENCRYPT, 0, IP_ICV_APPEND, 0, - 0, 0); - - /* if this is a hash or mac*/ - if (template[x] & 128) { - switch (x) { - case CRYPTO_MODE_HASH_CSHAKE128: - case CRYPTO_MODE_HASH_CSHAKE256: - case CRYPTO_MODE_MAC_KMAC128: - case CRYPTO_MODE_MAC_KMAC256: - case CRYPTO_MODE_MAC_KMACXOF128: - case CRYPTO_MODE_MAC_KMACXOF256: - /* special initial bytes to encode - * length for cust strings - */ - key[0] = 0x01; - key[1] = 0x70; - break; - } - - spacc_write_context(spacc, h, SPACC_HASH_OPERATION, - key, keysizes[1][y] + - (x == CRYPTO_MODE_MAC_XCBC ? 32 : 0), - key, 16); - } else { - u32 keysize; - - ivsize = 16; - keysize = keysizes[0][y]; - switch (x) { - case CRYPTO_MODE_CHACHA20_STREAM: - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_SM4_CCM: - ivsize = 16; - break; - case CRYPTO_MODE_SM4_GCM: - case CRYPTO_MODE_CHACHA20_POLY1305: - case CRYPTO_MODE_AES_GCM: - ivsize = 12; - break; - case CRYPTO_MODE_KASUMI_ECB: - case CRYPTO_MODE_KASUMI_F8: - case CRYPTO_MODE_3DES_CBC: - case CRYPTO_MODE_3DES_ECB: - case CRYPTO_MODE_DES_CBC: - case CRYPTO_MODE_DES_ECB: - ivsize = 8; - break; - case CRYPTO_MODE_SM4_XTS: - case CRYPTO_MODE_AES_XTS: - keysize <<= 1; - break; - } - spacc_write_context(spacc, h, SPACC_CRYPTO_OPERATION, - key, keysize, key, ivsize); - } - - spacc_set_key_exp(spacc, h); - - switch (x) { - case CRYPTO_MODE_ZUC_UEA3: - case CRYPTO_MODE_SNOW3G_UEA2: - case CRYPTO_MODE_MAC_SNOW3G_UIA2: - case CRYPTO_MODE_MAC_ZUC_UIA3: - case CRYPTO_MODE_KASUMI_F8: - spacc_set_auxinfo(spacc, h, 0, 0); - break; - case CRYPTO_MODE_MAC_KASUMI_F9: - spacc_set_auxinfo(spacc, h, 0, 8); - break; - } - - memset(virt, 0, 256); - - /* 16AAD/16PT or 32AAD/0PT depending on - * whether we're in a hash or not mode - */ - aadlen = 16; - proclen = 32; - if (!enc) - aadlen += 16; - - switch (x) { - case CRYPTO_MODE_SM4_CS1: - case CRYPTO_MODE_SM4_CS2: - case CRYPTO_MODE_SM4_CS3: - case CRYPTO_MODE_AES_CS1: - case CRYPTO_MODE_AES_CS2: - case CRYPTO_MODE_AES_CS3: - proclen = 31; - fallthrough; - case CRYPTO_MODE_SM4_XTS: - case CRYPTO_MODE_AES_XTS: - aadlen = 0; - } - - err = spacc_packet_enqueue_ddt(spacc, h, ddt, ddt, proclen, 0, - aadlen, 0, 0, 0); - if (err == CRYPTO_OK) { - do { - err = spacc_packet_dequeue(spacc, h); - } while (err == -EINPROGRESS); - } - if (err != CRYPTO_OK || !testdata[x].test[y].outlen || - memcmp(testdata[x].test[y].data, virt, - testdata[x].test[y].outlen)) { - spacc->config.modes[x] &= ~(1 << y); - } - spacc_close(spacc, h); - } -} - -int spacc_autodetect(struct spacc_device *spacc) -{ - struct pdu_ddt ddt; - dma_addr_t dma; - void *virt; - int x, y; - unsigned char key[64]; - - /* allocate DMA memory ...*/ - virt = dma_alloc_coherent(get_ddt_device(), 256, &dma, GFP_KERNEL); - if (!virt) - return -2; - - if (pdu_ddt_init(&ddt, 1)) { - dma_free_coherent(get_ddt_device(), 256, virt, dma); - return -3; - } - - pdu_ddt_add(&ddt, dma, 256); - - for (x = 0; x < 64; x++) - key[x] = x; - - for (x = 0; x < ARRAY_SIZE(template); x++) { - spacc->config.modes[x] = template[x]; - if (template[x] && spacc->config.version >= - testdata[x].min_version) { - for (y = 0; y < (ARRAY_SIZE(keysizes[0])); y++) - check_modes(spacc, x, y, virt, key, &ddt); - } - } - - pdu_ddt_free(&ddt); - dma_free_coherent(get_ddt_device(), 256, virt, dma); - - return 0; -} - -#else - -static void spacc_static_modes(struct spacc_device *spacc, int x, int y) -{ - /* Disable the algos that as not supported here */ - switch (x) { - case CRYPTO_MODE_AES_F8: - case CRYPTO_MODE_AES_CFB: - case CRYPTO_MODE_AES_OFB: - case CRYPTO_MODE_MULTI2_ECB: - case CRYPTO_MODE_MULTI2_CBC: - case CRYPTO_MODE_MULTI2_CFB: - case CRYPTO_MODE_MULTI2_OFB: - case CRYPTO_MODE_MAC_POLY1305: - case CRYPTO_MODE_HASH_CRC32: - /* Disable the modes */ - spacc->config.modes[x] &= ~(1 << y); - break; - default: - break;/* Algos are enabled */ - } -} - -int spacc_static_config(struct spacc_device *spacc) -{ - - int x, y; - - for (x = 0; x < ARRAY_SIZE(template); x++) { - spacc->config.modes[x] = template[x]; - - for (y = 0; y < (ARRAY_SIZE(keysizes[0])); y++) { - /* List static modes */ - spacc_static_modes(spacc, x, y); - } - } - - return 0; -} -#endif -int spacc_clone_handle(struct spacc_device *spacc, int old_handle, - void *cbdata) -{ - int new_handle; - - new_handle = spacc_job_request(spacc, spacc->job[old_handle].ctx_idx); - if (new_handle < 0) - return new_handle; - - spacc->job[new_handle] = spacc->job[old_handle]; - spacc->job[new_handle].job_used = new_handle; - spacc->job[new_handle].cbdata = cbdata; - - return new_handle; -} - -/* Allocates a job for spacc module context and initialize - * it with an appropriate type. - */ -int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctxid, - int secure_mode, spacc_callback cb, void *cbdata) -{ - u32 ctrl = 0; - int job_idx = 0; - int ret = CRYPTO_OK; - struct spacc_job *job = NULL; - - job_idx = spacc_job_request(spacc, ctxid); - if (job_idx < 0) - return -EIO; - - job = &spacc->job[job_idx]; - - if (secure_mode && job->ctx_idx > spacc->config.num_sec_ctx) { - pr_debug("ERR: For secure contexts"); - pr_debug("ERR: Job ctx ID is outside allowed range\n"); - spacc_job_release(spacc, job_idx); - return -EIO; - } - - job->auxinfo_cs_mode = 0; - job->auxinfo_bit_align = 0; - job->auxinfo_dir = 0; - job->icv_len = 0; - - switch (enc) { - case CRYPTO_MODE_NULL: - break; - case CRYPTO_MODE_AES_ECB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); - break; - case CRYPTO_MODE_AES_CBC: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - break; - - case CRYPTO_MODE_AES_CS1: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - job->auxinfo_cs_mode = 1; - break; - case CRYPTO_MODE_AES_CS2: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - job->auxinfo_cs_mode = 2; - break; - case CRYPTO_MODE_AES_CS3: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - job->auxinfo_cs_mode = 3; - break; - case CRYPTO_MODE_AES_CFB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CFB); - break; - case CRYPTO_MODE_AES_OFB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_OFB); - break; - case CRYPTO_MODE_AES_CTR: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CTR); - break; - case CRYPTO_MODE_AES_CCM: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CCM); - break; - case CRYPTO_MODE_AES_GCM: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_GCM); - break; - case CRYPTO_MODE_AES_F8: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_F8); - break; - case CRYPTO_MODE_AES_XTS: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_AES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_XTS); - break; - case CRYPTO_MODE_MULTI2_ECB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_MULTI2); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); - break; - case CRYPTO_MODE_MULTI2_CBC: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_MULTI2); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - break; - case CRYPTO_MODE_MULTI2_OFB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_MULTI2); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_OFB); - break; - case CRYPTO_MODE_MULTI2_CFB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_MULTI2); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CFB); - break; - case CRYPTO_MODE_3DES_CBC: - case CRYPTO_MODE_DES_CBC: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_DES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - break; - case CRYPTO_MODE_3DES_ECB: - case CRYPTO_MODE_DES_ECB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_DES); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); - break; - case CRYPTO_MODE_KASUMI_ECB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_KASUMI); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); - break; - case CRYPTO_MODE_KASUMI_F8: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_KASUMI); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_F8); - break; - case CRYPTO_MODE_SNOW3G_UEA2: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, - C_SNOW3G_UEA2); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); - break; - case CRYPTO_MODE_ZUC_UEA3: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, - C_ZUC_UEA3); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); - break; - case CRYPTO_MODE_CHACHA20_STREAM: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_CHACHA20); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CHACHA_STREAM); - break; - case CRYPTO_MODE_CHACHA20_POLY1305: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, - C_CHACHA20); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, - CM_CHACHA_AEAD); - break; - case CRYPTO_MODE_SM4_ECB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_ECB); - break; - case CRYPTO_MODE_SM4_CBC: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - break; - case CRYPTO_MODE_SM4_CS1: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - job->auxinfo_cs_mode = 1; - break; - case CRYPTO_MODE_SM4_CS2: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - job->auxinfo_cs_mode = 2; - break; - case CRYPTO_MODE_SM4_CS3: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CBC); - job->auxinfo_cs_mode = 3; - break; - case CRYPTO_MODE_SM4_CFB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CFB); - break; - case CRYPTO_MODE_SM4_OFB: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_OFB); - break; - case CRYPTO_MODE_SM4_CTR: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CTR); - break; - case CRYPTO_MODE_SM4_CCM: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_CCM); - break; - case CRYPTO_MODE_SM4_GCM: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_GCM); - break; - case CRYPTO_MODE_SM4_F8: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_F8); - break; - case CRYPTO_MODE_SM4_XTS: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_ALG, C_SM4); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_CIPH_MODE, CM_XTS); - break; - default: - ret = -EOPNOTSUPP; - } - - switch (hash) { - case CRYPTO_MODE_NULL: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_NULL); - break; - case CRYPTO_MODE_HMAC_SHA1: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA1); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_HMAC_MD5: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_MD5); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_HMAC_SHA224: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA224); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_HMAC_SHA256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_HMAC_SHA384: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA384); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_HMAC_SHA512: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA512); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_HMAC_SHA512_224: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHA512_224); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_HMAC_SHA512_256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHA512_256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_SSLMAC_MD5: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_MD5); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SSLMAC); - break; - case CRYPTO_MODE_SSLMAC_SHA1: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA1); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SSLMAC); - break; - case CRYPTO_MODE_HASH_SHA1: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA1); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_MD5: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_MD5); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA224: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA224); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA384: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA384); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA512: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SHA512); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA512_224: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHA512_224); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA512_256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHA512_256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA3_224: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHA3_224); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA3_256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHA3_256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA3_384: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHA3_384); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHA3_512: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHA3_512); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SHAKE128: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHAKE128); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_SHAKE); - break; - case CRYPTO_MODE_HASH_SHAKE256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHAKE256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_SHAKE); - break; - case CRYPTO_MODE_HASH_CSHAKE128: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHAKE128); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_CSHAKE); - break; - case CRYPTO_MODE_HASH_CSHAKE256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHAKE256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_CSHAKE); - break; - case CRYPTO_MODE_MAC_KMAC128: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHAKE128); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_KMAC); - break; - case CRYPTO_MODE_MAC_KMAC256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHAKE256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_KMAC); - break; - case CRYPTO_MODE_MAC_KMACXOF128: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHAKE128); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_KMAC); - /* auxinfo_dir reused to indicate XOF */ - job->auxinfo_dir = 1; - break; - case CRYPTO_MODE_MAC_KMACXOF256: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SHAKE256); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, - HM_SHAKE_KMAC); - /* auxinfo_dir reused to indicate XOF */ - job->auxinfo_dir = 1; - break; - case CRYPTO_MODE_MAC_XCBC: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_XCBC); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_MAC_CMAC: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_CMAC); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_MAC_KASUMI_F9: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_KF9); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_MAC_SNOW3G_UIA2: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SNOW3G_UIA2); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_MAC_ZUC_UIA3: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_ZUC_UIA3); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_MAC_POLY1305: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_POLY1305); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_CRC32: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_CRC32_I3E802_3); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_MAC_MICHAEL: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_MICHAEL); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HASH_SM3: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SM3); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_HMAC_SM3: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, H_SM3); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_HMAC); - break; - case CRYPTO_MODE_MAC_SM4_XCBC: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SM4_XCBC_MAC); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - case CRYPTO_MODE_MAC_SM4_CMAC: - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_ALG, - H_SM4_CMAC); - ctrl |= SPACC_CTRL_SET(SPACC_CTRL_HASH_MODE, HM_RAW); - break; - default: - ret = -EOPNOTSUPP; - } - ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_MSG_BEGIN) | - SPACC_CTRL_MASK(SPACC_CTRL_MSG_END); - - if (ret != CRYPTO_OK) { - spacc_job_release(spacc, job_idx); - } else { - ret = job_idx; - job->first_use = 1; - job->enc_mode = enc; - job->hash_mode = hash; - job->ckey_sz = 0; - job->hkey_sz = 0; - job->job_done = 0; - job->job_swid = 0; - job->job_secure = !!secure_mode; - - job->auxinfo_bit_align = 0; - job->job_err = -EINPROGRESS; - job->ctrl = ctrl | - SPACC_CTRL_SET(SPACC_CTRL_CTX_IDX, - job->ctx_idx); - job->cb = cb; - job->cbdata = cbdata; - } - - return ret; -} - -static int spacc_xof_stringsize_autodetect(struct spacc_device *spacc) -{ - void *virt; - dma_addr_t dma; - struct pdu_ddt ddt; - int ss, alg, i, stat; - unsigned long spacc_ctrl[2] = {0xF400B400, 0xF400D400}; - unsigned char buf[256]; - unsigned long buflen, rbuf; - unsigned char test_str[6] = {0x01, 0x20, 0x54, 0x45, 0x53, 0x54}; - unsigned char md[2][16] = { - {0xc3, 0x6d, 0x0a, 0x88, 0xfa, 0x37, 0x4c, 0x9b, - 0x44, 0x74, 0xeb, 0x00, 0x5f, 0xe8, 0xca, 0x25}, - {0x68, 0x77, 0x04, 0x11, 0xf8, 0xe3, 0xb0, 0x1e, - 0x0d, 0xbf, 0x71, 0x6a, 0xe9, 0x87, 0x1a, 0x0d}}; - - virt = dma_alloc_coherent(get_ddt_device(), 256, &dma, GFP_KERNEL); - if (!virt) - return -EIO; - - if (pdu_ddt_init(&ddt, 1)) { - dma_free_coherent(get_ddt_device(), 256, virt, dma); - return -EIO; - } - pdu_ddt_add(&ddt, dma, 256); - - /* populate registers for jobs*/ - writel((uint32_t)ddt.phys, spacc->regmap + SPACC_REG_SRC_PTR); - writel((uint32_t)ddt.phys, spacc->regmap + SPACC_REG_DST_PTR); - - writel(16, spacc->regmap + SPACC_REG_PROC_LEN); - writel(16, spacc->regmap + SPACC_REG_PRE_AAD_LEN); - writel(16, spacc->regmap + SPACC_REG_ICV_LEN); - writel(6, spacc->regmap + SPACC_REG_KEY_SZ); - writel(0, spacc->regmap + SPACC_REG_SW_CTRL); - - /* repeat for 2 algorithms, CSHAKE128 and KMAC128*/ - for (alg = 0; (alg < 2) && (spacc->config.string_size == 0); alg++) { - /* repeat for 4 string_size sizes*/ - for (ss = 0; ss < 4; ss++) { - buflen = (32UL << ss); - if (buflen > spacc->config.hash_page_size) - break; - - /* clear I/O memory*/ - memset(virt, 0, 256); - - /* clear buf and then insert test string*/ - memset(buf, 0, sizeof(buf)); - memcpy(buf, test_str, sizeof(test_str)); - memcpy(buf + (buflen >> 1), test_str, sizeof(test_str)); - - /* write key context */ - pdu_to_dev_s(spacc->regmap + SPACC_CTX_HASH_KEY, - buf, - spacc->config.hash_page_size >> 2, - spacc->config.spacc_endian); - - /* write ctrl register */ - writel(spacc_ctrl[alg], spacc->regmap + SPACC_REG_CTRL); - - /* wait for job to complete */ - for (i = 0; i < 20; i++) { - rbuf = 0; - rbuf = readl(spacc->regmap + - SPACC_REG_FIFO_STAT) & - SPACC_FIFO_STAT_STAT_EMPTY; - if (!rbuf) { - /* check result, if it matches, - * we have string_size - */ - writel(1, spacc->regmap + - SPACC_REG_STAT_POP); - rbuf = 0; - rbuf = readl(spacc->regmap + - SPACC_REG_STATUS); - stat = SPACC_GET_STATUS_RET_CODE(rbuf); - if ((!memcmp(virt, md[alg], 16)) && - stat == SPACC_OK) { - spacc->config.string_size = - (16 << ss); - } - break; - } - } - } - } - - /* reset registers */ - writel(0, spacc->regmap + SPACC_REG_IRQ_CTRL); - writel(0, spacc->regmap + SPACC_REG_IRQ_EN); - writel(0xFFFFFFFF, spacc->regmap + SPACC_REG_IRQ_STAT); - - writel(0, spacc->regmap + SPACC_REG_SRC_PTR); - writel(0, spacc->regmap + SPACC_REG_DST_PTR); - writel(0, spacc->regmap + SPACC_REG_PROC_LEN); - writel(0, spacc->regmap + SPACC_REG_ICV_LEN); - writel(0, spacc->regmap + SPACC_REG_PRE_AAD_LEN); - - pdu_ddt_free(&ddt); - dma_free_coherent(get_ddt_device(), 256, virt, dma); - - return CRYPTO_OK; -} - -/* free up the memory */ -void spacc_fini(struct spacc_device *spacc) -{ - vfree(spacc->ctx); - vfree(spacc->job); -} - -int spacc_init(void __iomem *baseaddr, struct spacc_device *spacc, - struct pdu_info *info) -{ - unsigned long id; - char version_string[3][16] = { "SPACC", "SPACC-PDU" }; - char idx_string[2][16] = { "(Normal Port)", "(Secure Port)" }; - char dma_type_string[4][16] = { "Unknown", "Scattergather", "Linear", - "Unknown" }; - - if (!baseaddr) { - pr_debug("ERR: baseaddr is NULL\n"); - return -1; - } - if (!spacc) { - pr_debug("ERR: spacc is NULL\n"); - return -1; - } - - memset(spacc, 0, sizeof(*spacc)); - spin_lock_init(&spacc->lock); - spin_lock_init(&spacc->ctx_lock); - - /* assign the baseaddr*/ - spacc->regmap = baseaddr; - - /* version info*/ - spacc->config.version = info->spacc_version.version; - spacc->config.pdu_version = (info->pdu_config.major << 4) | - info->pdu_config.minor; - spacc->config.project = info->spacc_version.project; - spacc->config.is_pdu = info->spacc_version.is_pdu; - spacc->config.is_qos = info->spacc_version.qos; - - /* misc*/ - spacc->config.is_partial = info->spacc_version.partial; - spacc->config.num_ctx = info->spacc_config.num_ctx; - spacc->config.ciph_page_size = 1U << - info->spacc_config.ciph_ctx_page_size; - - spacc->config.hash_page_size = 1U << - info->spacc_config.hash_ctx_page_size; - - spacc->config.dma_type = info->spacc_config.dma_type; - spacc->config.idx = info->spacc_version.vspacc_idx; - spacc->config.cmd0_fifo_depth = info->spacc_config.cmd0_fifo_depth; - spacc->config.cmd1_fifo_depth = info->spacc_config.cmd1_fifo_depth; - spacc->config.cmd2_fifo_depth = info->spacc_config.cmd2_fifo_depth; - spacc->config.stat_fifo_depth = info->spacc_config.stat_fifo_depth; - spacc->config.fifo_cnt = 1; - spacc->config.is_ivimport = info->spacc_version.ivimport; - - /* ctrl register map*/ - if (spacc->config.version <= 0x4E) - spacc->config.ctrl_map = spacc_ctrl_map[SPACC_CTRL_VER_0]; - else if (spacc->config.version <= 0x60) - spacc->config.ctrl_map = spacc_ctrl_map[SPACC_CTRL_VER_1]; - else - spacc->config.ctrl_map = spacc_ctrl_map[SPACC_CTRL_VER_2]; - - spacc->job_next_swid = 0; - spacc->wdcnt = 0; - spacc->config.wd_timer = SPACC_WD_TIMER_INIT; - - /* version 4.10 uses IRQ, - * above uses WD and we don't support below 4.00 - */ - if (spacc->config.version < 0x40) { - pr_debug("ERR: Unsupported SPAcc version\n"); - return -EIO; - } else if (spacc->config.version < 0x4B) { - spacc->op_mode = SPACC_OP_MODE_IRQ; - } else { - spacc->op_mode = SPACC_OP_MODE_WD; - } - - /* set threshold and enable irq - * on 4.11 and newer cores we can derive this - * from the HW reported depths. - */ - if (spacc->config.stat_fifo_depth == 1) - spacc->config.ideal_stat_level = 1; - else if (spacc->config.stat_fifo_depth <= 4) - spacc->config.ideal_stat_level = - spacc->config.stat_fifo_depth - 1; - else if (spacc->config.stat_fifo_depth <= 8) - spacc->config.ideal_stat_level = - spacc->config.stat_fifo_depth - 2; - else - spacc->config.ideal_stat_level = - spacc->config.stat_fifo_depth - 4; - - /* determine max PROClen value */ - writel(0xFFFFFFFF, spacc->regmap + SPACC_REG_PROC_LEN); - spacc->config.max_msg_size = readl(spacc->regmap + SPACC_REG_PROC_LEN); - - /* read config info*/ - if (spacc->config.is_pdu) { - pr_debug("PDU:\n"); - pr_debug(" MAJOR : %u\n", info->pdu_config.major); - pr_debug(" MINOR : %u\n", info->pdu_config.minor); - } - - id = readl(spacc->regmap + SPACC_REG_ID); - pr_debug("SPACC ID: (%08lx)\n", (unsigned long)id); - pr_debug(" MAJOR : %x\n", info->spacc_version.major); - pr_debug(" MINOR : %x\n", info->spacc_version.minor); - pr_debug(" QOS : %x\n", info->spacc_version.qos); - pr_debug(" IVIMPORT : %x\n", spacc->config.is_ivimport); - - if (spacc->config.version >= 0x48) - pr_debug(" TYPE : %lx (%s)\n", SPACC_ID_TYPE(id), - version_string[SPACC_ID_TYPE(id) & 3]); - - pr_debug(" AUX : %x\n", info->spacc_version.qos); - pr_debug(" IDX : %lx %s\n", SPACC_ID_VIDX(id), - spacc->config.is_secure ? - (idx_string[spacc->config.is_secure_port & 1]) : ""); - pr_debug(" PARTIAL : %x\n", info->spacc_version.partial); - pr_debug(" PROJECT : %x\n", info->spacc_version.project); - - if (spacc->config.version >= 0x48) - id = readl(spacc->regmap + SPACC_REG_CONFIG); - else - id = 0xFFFFFFFF; - - pr_debug("SPACC CFG: (%08lx)\n", id); - pr_debug(" CTX CNT : %u\n", info->spacc_config.num_ctx); - pr_debug(" VSPACC CNT : %u\n", info->spacc_config.num_vspacc); - pr_debug(" CIPH SZ : %-3lu bytes\n", 1UL << - info->spacc_config.ciph_ctx_page_size); - pr_debug(" HASH SZ : %-3lu bytes\n", 1UL << - info->spacc_config.hash_ctx_page_size); - pr_debug(" DMA TYPE : %u (%s)\n", info->spacc_config.dma_type, - dma_type_string[info->spacc_config.dma_type & 3]); - pr_debug(" MAX PROCLEN: %lu bytes\n", (unsigned long) - spacc->config.max_msg_size); - pr_debug(" FIFO CONFIG :\n"); - pr_debug(" CMD0 DEPTH: %d\n", spacc->config.cmd0_fifo_depth); - - if (spacc->config.is_qos) { - pr_debug(" CMD1 DEPTH: %d\n", - spacc->config.cmd1_fifo_depth); - pr_debug(" CMD2 DEPTH: %d\n", - spacc->config.cmd2_fifo_depth); - } - pr_debug(" STAT DEPTH: %d\n", spacc->config.stat_fifo_depth); - - if (spacc->config.dma_type == SPACC_DMA_DDT) { - writel(0x1234567F, baseaddr + SPACC_REG_DST_PTR); - writel(0xDEADBEEF, baseaddr + SPACC_REG_SRC_PTR); - - if (((readl(baseaddr + SPACC_REG_DST_PTR)) != - (0x1234567F & SPACC_DST_PTR_PTR)) || - ((readl(baseaddr + SPACC_REG_SRC_PTR)) != - (0xDEADBEEF & SPACC_SRC_PTR_PTR))) { - pr_debug("ERR: Failed to set pointers\n"); - goto ERR; - } - } - - /* zero the IRQ CTRL/EN register - * (to make sure we're in a sane state) - */ - writel(0, spacc->regmap + SPACC_REG_IRQ_CTRL); - writel(0, spacc->regmap + SPACC_REG_IRQ_EN); - writel(0xFFFFFFFF, spacc->regmap + SPACC_REG_IRQ_STAT); - - /* init cache*/ - memset(&spacc->cache, 0, sizeof(spacc->cache)); - writel(0, spacc->regmap + SPACC_REG_SRC_PTR); - writel(0, spacc->regmap + SPACC_REG_DST_PTR); - writel(0, spacc->regmap + SPACC_REG_PROC_LEN); - writel(0, spacc->regmap + SPACC_REG_ICV_LEN); - writel(0, spacc->regmap + SPACC_REG_ICV_OFFSET); - writel(0, spacc->regmap + SPACC_REG_PRE_AAD_LEN); - writel(0, spacc->regmap + SPACC_REG_POST_AAD_LEN); - writel(0, spacc->regmap + SPACC_REG_IV_OFFSET); - writel(0, spacc->regmap + SPACC_REG_OFFSET); - writel(0, spacc->regmap + SPACC_REG_AUX_INFO); - - spacc->ctx = vmalloc(sizeof(struct spacc_ctx) * spacc->config.num_ctx); - if (!spacc->ctx) - goto ERR; - - spacc->job = vmalloc(sizeof(struct spacc_job) * SPACC_MAX_JOBS); - if (!spacc->job) - goto ERR; - - /* initialize job_idx and lookup table */ - spacc_job_init_all(spacc); - - /* initialize contexts */ - spacc_ctx_init_all(spacc); - - /* autodetect and set string size setting*/ - if (spacc->config.version == 0x61 || spacc->config.version >= 0x65) - spacc_xof_stringsize_autodetect(spacc); - - return CRYPTO_OK; -ERR: - spacc_fini(spacc); - pr_debug("ERR: Crypto Failed\n"); - - return -EIO; -} - -/* callback function to initialize tasklet running */ -void spacc_pop_jobs(unsigned long data) -{ - int num = 0; - struct spacc_priv *priv = (struct spacc_priv *)data; - struct spacc_device *spacc = &priv->spacc; - - /* decrement the WD CNT here since - * now we're actually going to respond - * to the IRQ completely - */ - if (spacc->wdcnt) - --(spacc->wdcnt); - - spacc_pop_packets(spacc, &num); -} - -int spacc_remove(struct platform_device *pdev) -{ - struct spacc_device *spacc; - struct spacc_priv *priv = platform_get_drvdata(pdev); - - /* free test vector memory*/ - spacc = &priv->spacc; - spacc_fini(spacc); - - tasklet_kill(&priv->pop_jobs); - - /* devm functions do proper cleanup */ - pdu_mem_deinit(&pdev->dev); - dev_dbg(&pdev->dev, "removed!\n"); - - return 0; -} - -int spacc_set_key_exp(struct spacc_device *spacc, int job_idx) -{ - struct spacc_ctx *ctx = NULL; - struct spacc_job *job = NULL; - - if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) { - pr_debug("ERR: Invalid Job id specified (out of range)\n"); - return -ENXIO; - } - - job = &spacc->job[job_idx]; - ctx = context_lookup_by_job(spacc, job_idx); - - if (!ctx) { - pr_debug("ERR: Failed to find ctx id\n"); - return -EIO; - } - - job->ctrl |= SPACC_CTRL_MASK(SPACC_CTRL_KEY_EXP); - - return CRYPTO_OK; -} - -int spacc_compute_xcbc_key(struct spacc_device *spacc, int mode_id, - int job_idx, const unsigned char *key, - int keylen, unsigned char *xcbc_out) -{ - unsigned char *buf; - dma_addr_t bufphys; - struct pdu_ddt ddt; - unsigned char iv[16]; - int err, i, handle, usecbc, ctx_idx; - - if (job_idx >= 0 && job_idx < SPACC_MAX_JOBS) - ctx_idx = spacc->job[job_idx].ctx_idx; - else - ctx_idx = -1; - - if (mode_id == CRYPTO_MODE_MAC_XCBC) { - /* figure out if we can schedule the key */ - if (spacc_isenabled(spacc, CRYPTO_MODE_AES_ECB, 16)) - usecbc = 0; - else if (spacc_isenabled(spacc, CRYPTO_MODE_AES_CBC, 16)) - usecbc = 1; - else - return -1; - } else if (mode_id == CRYPTO_MODE_MAC_SM4_XCBC) { - /* figure out if we can schedule the key */ - if (spacc_isenabled(spacc, CRYPTO_MODE_SM4_ECB, 16)) - usecbc = 0; - else if (spacc_isenabled(spacc, CRYPTO_MODE_SM4_CBC, 16)) - usecbc = 1; - else - return -1; - } else { - return -1; - } - - memset(iv, 0, sizeof(iv)); - memset(&ddt, 0, sizeof(ddt)); - - buf = dma_alloc_coherent(get_ddt_device(), 64, &bufphys, GFP_KERNEL); - if (!buf) - return -EINVAL; - - handle = -1; - - /* set to 1111...., 2222...., 333... */ - for (i = 0; i < 48; i++) - buf[i] = (i >> 4) + 1; - - /* build DDT */ - err = pdu_ddt_init(&ddt, 1); - if (err) - goto xcbc_err; - - pdu_ddt_add(&ddt, bufphys, 48); - - /* open a handle in either CBC or ECB mode */ - if (mode_id == CRYPTO_MODE_MAC_XCBC) { - handle = spacc_open(spacc, (usecbc ? - CRYPTO_MODE_AES_CBC : CRYPTO_MODE_AES_ECB), - CRYPTO_MODE_NULL, ctx_idx, 0, NULL, NULL); - if (handle < 0) { - err = handle; - goto xcbc_err; - } - } else if (mode_id == CRYPTO_MODE_MAC_SM4_XCBC) { - handle = spacc_open(spacc, (usecbc ? - CRYPTO_MODE_SM4_CBC : CRYPTO_MODE_SM4_ECB), - CRYPTO_MODE_NULL, ctx_idx, 0, NULL, NULL); - if (handle < 0) { - err = handle; - goto xcbc_err; - } - } - spacc_set_operation(spacc, handle, OP_ENCRYPT, 0, 0, 0, 0, 0); - - if (usecbc) { - /* we can do the ECB work in CBC using three - * jobs with the IVreset to zero each time - */ - for (i = 0; i < 3; i++) { - spacc_write_context(spacc, handle, - SPACC_CRYPTO_OPERATION, key, - keylen, iv, 16); - err = spacc_packet_enqueue_ddt(spacc, handle, &ddt, - &ddt, 16, (i * 16) | - ((i * 16) << 16), 0, 0, 0, 0); - if (err != CRYPTO_OK) - goto xcbc_err; - - do { - err = spacc_packet_dequeue(spacc, handle); - } while (err == -EINPROGRESS); - if (err != CRYPTO_OK) - goto xcbc_err; - } - } else { - /* do the 48 bytes as a single SPAcc job this is the ideal case - * but only possible if ECB was enabled in the core - */ - spacc_write_context(spacc, handle, SPACC_CRYPTO_OPERATION, - key, keylen, iv, 16); - err = spacc_packet_enqueue_ddt(spacc, handle, &ddt, &ddt, 48, - 0, 0, 0, 0, 0); - if (err != CRYPTO_OK) - goto xcbc_err; - - do { - err = spacc_packet_dequeue(spacc, handle); - } while (err == -EINPROGRESS); - if (err != CRYPTO_OK) - goto xcbc_err; - } - - /* now we can copy the key*/ - memcpy(xcbc_out, buf, 48); - memset(buf, 0, 64); - -xcbc_err: - dma_free_coherent(get_ddt_device(), 64, buf, bufphys); - pdu_ddt_free(&ddt); - if (handle >= 0) - spacc_close(spacc, handle); - - if (err) - return -EINVAL; - - return 0; -} diff --git a/drivers/crypto/dwc-spacc/spacc_core.h b/drivers/crypto/dwc-spacc/spacc_core.h deleted file mode 100644 index 297a08eea0d2..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_core.h +++ /dev/null @@ -1,824 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - - -#ifndef SPACC_CORE_H_ -#define SPACC_CORE_H_ - -#include -#include -#include -#include -#include -#include "spacc_hal.h" - -enum { - SPACC_DMA_UNDEF = 0, - SPACC_DMA_DDT = 1, - SPACC_DMA_LINEAR = 2 -}; - -enum { - SPACC_OP_MODE_IRQ = 0, - SPACC_OP_MODE_WD = 1 /* watchdog */ -}; - -#define OP_ENCRYPT 0 -#define OP_DECRYPT 1 - -#define SPACC_CRYPTO_OPERATION 1 -#define SPACC_HASH_OPERATION 2 - -#define SPACC_AADCOPY_FLAG 0x80000000 - -#define SPACC_AUTO_SIZE (-1) - -#define SPACC_WD_LIMIT 0x80 -#define SPACC_WD_TIMER_INIT 0x40000 - -/********* Register Offsets **********/ -#define SPACC_REG_IRQ_EN 0x00000L -#define SPACC_REG_IRQ_STAT 0x00004L -#define SPACC_REG_IRQ_CTRL 0x00008L -#define SPACC_REG_FIFO_STAT 0x0000CL -#define SPACC_REG_SDMA_BRST_SZ 0x00010L - -#define SPACC_REG_SRC_PTR 0x00020L -#define SPACC_REG_DST_PTR 0x00024L -#define SPACC_REG_OFFSET 0x00028L -#define SPACC_REG_PRE_AAD_LEN 0x0002CL -#define SPACC_REG_POST_AAD_LEN 0x00030L - -#define SPACC_REG_PROC_LEN 0x00034L -#define SPACC_REG_ICV_LEN 0x00038L -#define SPACC_REG_ICV_OFFSET 0x0003CL -#define SPACC_REG_IV_OFFSET 0x00040L - -#define SPACC_REG_SW_CTRL 0x00044L -#define SPACC_REG_AUX_INFO 0x00048L -#define SPACC_REG_CTRL 0x0004CL - -#define SPACC_REG_STAT_POP 0x00050L -#define SPACC_REG_STATUS 0x00054L - -#define SPACC_REG_STAT_WD_CTRL 0x00080L - -#define SPACC_REG_KEY_SZ 0x00100L - -#define SPACC_REG_VIRTUAL_RQST 0x00140L -#define SPACC_REG_VIRTUAL_ALLOC 0x00144L -#define SPACC_REG_VIRTUAL_PRIO 0x00148L - -#define SPACC_REG_ID 0x00180L -#define SPACC_REG_CONFIG 0x00184L -#define SPACC_REG_CONFIG2 0x00190L - -#define SPACC_REG_SECURE_CTRL 0x001C0L -#define SPACC_REG_SECURE_RELEASE 0x001C4 - -#define SPACC_REG_SK_LOAD 0x00200L -#define SPACC_REG_SK_STAT 0x00204L -#define SPACC_REG_SK_KEY 0x00240L - -#define SPACC_REG_VERSION_EXT_3 0x00194L - -/* out 8MB from base of SPACC */ -#define SPACC_REG_SKP 0x800000UL - -/********** Context Offsets **********/ -#define SPACC_CTX_CIPH_KEY 0x04000L -#define SPACC_CTX_HASH_KEY 0x08000L - -/******** Sub-Context Offsets ********/ -#define SPACC_CTX_AES_KEY 0x00 -#define SPACC_CTX_AES_IV 0x20 - -#define SPACC_CTX_DES_KEY 0x08 -#define SPACC_CTX_DES_IV 0x00 - -/* use these to loop over CMDX macros */ -#define SPACC_CMDX_MAX 1 -#define SPACC_CMDX_MAX_QOS 3 -/********** IRQ_EN Bit Masks **********/ - -#define _SPACC_IRQ_CMD0 0 -#define _SPACC_IRQ_STAT 4 -#define _SPACC_IRQ_STAT_WD 12 -#define _SPACC_IRQ_GLBL 31 - -#define SPACC_IRQ_EN_CMD(x) (1UL << _SPACC_IRQ_CMD0 << (x)) -#define SPACC_IRQ_EN_STAT BIT(_SPACC_IRQ_STAT) -#define SPACC_IRQ_EN_STAT_WD BIT(_SPACC_IRQ_STAT_WD) -#define SPACC_IRQ_EN_GLBL BIT(_SPACC_IRQ_GLBL) - -/********* IRQ_STAT Bitmasks *********/ - -#define SPACC_IRQ_STAT_CMDX(x) (1UL << _SPACC_IRQ_CMD0 << (x)) -#define SPACC_IRQ_STAT_STAT BIT(_SPACC_IRQ_STAT) -#define SPACC_IRQ_STAT_STAT_WD BIT(_SPACC_IRQ_STAT_WD) - -#define SPACC_IRQ_STAT_CLEAR_STAT(spacc) writel(SPACC_IRQ_STAT_STAT, \ - (spacc)->regmap + SPACC_REG_IRQ_STAT) - -#define SPACC_IRQ_STAT_CLEAR_STAT_WD(spacc) writel(SPACC_IRQ_STAT_STAT_WD, \ - (spacc)->regmap + SPACC_REG_IRQ_STAT) - -#define SPACC_IRQ_STAT_CLEAR_CMDX(spacc, x) writel(SPACC_IRQ_STAT_CMDX(x), \ - (spacc)->regmap + SPACC_REG_IRQ_STAT) - -/********* IRQ_CTRL Bitmasks *********/ -/* CMD0 = 0; for QOS, CMD1 = 8, CMD2 = 16 */ -#define _SPACC_IRQ_CTRL_CMDX_CNT(x) (8 * (x)) -#define SPACC_IRQ_CTRL_CMDX_CNT_SET(x, n) \ - (((n) & 0xFF) << _SPACC_IRQ_CTRL_CMDX_CNT(x)) -#define SPACC_IRQ_CTRL_CMDX_CNT_MASK(x) \ - (0xFF << _SPACC_IRQ_CTRL_CMDX_CNT(x)) - -/* STAT_CNT is at 16 and for QOS at 24 */ -#define _SPACC_IRQ_CTRL_STAT_CNT 16 -#define SPACC_IRQ_CTRL_STAT_CNT_SET(n) ((n) << _SPACC_IRQ_CTRL_STAT_CNT) -#define SPACC_IRQ_CTRL_STAT_CNT_MASK (0x1FF << _SPACC_IRQ_CTRL_STAT_CNT) - -#define _SPACC_IRQ_CTRL_STAT_CNT_QOS 24 -#define SPACC_IRQ_CTRL_STAT_CNT_SET_QOS(n) \ - ((n) << _SPACC_IRQ_CTRL_STAT_CNT_QOS) -#define SPACC_IRQ_CTRL_STAT_CNT_MASK_QOS \ - (0x7F << _SPACC_IRQ_CTRL_STAT_CNT_QOS) - -/******** FIFO_STAT Bitmasks *********/ - -/* SPACC with QOS */ -#define SPACC_FIFO_STAT_CMDX_CNT_MASK(x) \ - (0x7F << ((x) * 8)) -#define SPACC_FIFO_STAT_CMDX_CNT_GET(x, y) \ - (((y) & SPACC_FIFO_STAT_CMDX_CNT_MASK(x)) >> ((x) * 8)) -#define SPACC_FIFO_STAT_CMDX_FULL(x) (1UL << (7 + (x) * 8)) - -#define _SPACC_FIFO_STAT_STAT_CNT_QOS 24 -#define SPACC_FIFO_STAT_STAT_CNT_MASK_QOS \ - (0x7F << _SPACC_FIFO_STAT_STAT_CNT_QOS) -#define SPACC_FIFO_STAT_STAT_CNT_GET_QOS(y) \ - (((y) & \ - SPACC_FIFO_STAT_STAT_CNT_MASK_QOS) >> _SPACC_FIFO_STAT_STAT_CNT_QOS) - -/* SPACC without QOS */ -#define SPACC_FIFO_STAT_CMD0_CNT_MASK (0x1FF) -#define SPACC_FIFO_STAT_CMD0_CNT_GET(y) ((y) & SPACC_FIFO_STAT_CMD0_CNT_MASK) -#define _SPACC_FIFO_STAT_CMD0_FULL 15 -#define SPACC_FIFO_STAT_CMD0_FULL BIT(_SPACC_FIFO_STAT_CMD0_FULL) - -#define _SPACC_FIFO_STAT_STAT_CNT 16 -#define SPACC_FIFO_STAT_STAT_CNT_MASK (0x1FF << _SPACC_FIFO_STAT_STAT_CNT) -#define SPACC_FIFO_STAT_STAT_CNT_GET(y) \ - (((y) & SPACC_FIFO_STAT_STAT_CNT_MASK) >> _SPACC_FIFO_STAT_STAT_CNT) - -/* both */ -#define _SPACC_FIFO_STAT_STAT_EMPTY 31 -#define SPACC_FIFO_STAT_STAT_EMPTY BIT(_SPACC_FIFO_STAT_STAT_EMPTY) - -/********* SRC/DST_PTR Bitmasks **********/ - -#define SPACC_SRC_PTR_PTR 0xFFFFFFF8 -#define SPACC_DST_PTR_PTR 0xFFFFFFF8 - -/********** OFFSET Bitmasks **********/ - -#define SPACC_OFFSET_SRC_O 0 -#define SPACC_OFFSET_SRC_W 16 -#define SPACC_OFFSET_DST_O 16 -#define SPACC_OFFSET_DST_W 16 - -#define SPACC_MIN_CHUNK_SIZE 1024 -#define SPACC_MAX_CHUNK_SIZE 16384 - -/********* PKT_LEN Bitmasks **********/ - -#ifndef _SPACC_PKT_LEN_PROC_LEN -#define _SPACC_PKT_LEN_PROC_LEN 0 -#endif -#ifndef _SPACC_PKT_LEN_AAD_LEN -#define _SPACC_PKT_LEN_AAD_LEN 16 -#endif - -/********* SW_CTRL Bitmasks ***********/ - -#define _SPACC_SW_CTRL_ID_0 0 -#define SPACC_SW_CTRL_ID_W 8 -#define SPACC_SW_CTRL_ID_MASK (0xFF << _SPACC_SW_CTRL_ID_0) -#define SPACC_SW_CTRL_ID_GET(y) \ - (((y) & SPACC_SW_CTRL_ID_MASK) >> _SPACC_SW_CTRL_ID_0) -#define SPACC_SW_CTRL_ID_SET(id) \ - (((id) & SPACC_SW_CTRL_ID_MASK) >> _SPACC_SW_CTRL_ID_0) - -#define _SPACC_SW_CTRL_PRIO 30 -#define SPACC_SW_CTRL_PRIO_MASK 0x3 -#define SPACC_SW_CTRL_PRIO_SET(prio) \ - (((prio) & SPACC_SW_CTRL_PRIO_MASK) << _SPACC_SW_CTRL_PRIO) - -/* Priorities */ -#define SPACC_SW_CTRL_PRIO_HI 0 -#define SPACC_SW_CTRL_PRIO_MED 1 -#define SPACC_SW_CTRL_PRIO_LOW 2 - -/*********** SECURE_CTRL bitmasks *********/ -#define _SPACC_SECURE_CTRL_MS_SRC 0 -#define _SPACC_SECURE_CTRL_MS_DST 1 -#define _SPACC_SECURE_CTRL_MS_DDT 2 -#define _SPACC_SECURE_CTRL_LOCK 31 - -#define SPACC_SECURE_CTRL_MS_SRC BIT(_SPACC_SECURE_CTRL_MS_SRC) -#define SPACC_SECURE_CTRL_MS_DST BIT(_SPACC_SECURE_CTRL_MS_DST) -#define SPACC_SECURE_CTRL_MS_DDT BIT(_SPACC_SECURE_CTRL_MS_DDT) -#define SPACC_SECURE_CTRL_LOCK BIT(_SPACC_SECURE_CTRL_LOCK) - -/********* SKP bits **************/ -#define _SPACC_SK_LOAD_CTX_IDX 0 -#define _SPACC_SK_LOAD_ALG 8 -#define _SPACC_SK_LOAD_MODE 12 -#define _SPACC_SK_LOAD_SIZE 16 -#define _SPACC_SK_LOAD_ENC_EN 30 -#define _SPACC_SK_LOAD_DEC_EN 31 -#define _SPACC_SK_STAT_BUSY 0 - -#define SPACC_SK_LOAD_ENC_EN BIT(_SPACC_SK_LOAD_ENC_EN) -#define SPACC_SK_LOAD_DEC_EN BIT(_SPACC_SK_LOAD_DEC_EN) -#define SPACC_SK_STAT_BUSY BIT(_SPACC_SK_STAT_BUSY) - -/*********** CTRL Bitmasks ***********/ -/* These CTRL field locations vary with SPACC version - * and if they are used, they should be set accordingly - */ -#define _SPACC_CTRL_CIPH_ALG 0 -#define _SPACC_CTRL_HASH_ALG 4 -#define _SPACC_CTRL_CIPH_MODE 8 -#define _SPACC_CTRL_HASH_MODE 12 -#define _SPACC_CTRL_MSG_BEGIN 14 -#define _SPACC_CTRL_MSG_END 15 -#define _SPACC_CTRL_CTX_IDX 16 -#define _SPACC_CTRL_ENCRYPT 24 -#define _SPACC_CTRL_AAD_COPY 25 -#define _SPACC_CTRL_ICV_PT 26 -#define _SPACC_CTRL_ICV_ENC 27 -#define _SPACC_CTRL_ICV_APPEND 28 -#define _SPACC_CTRL_KEY_EXP 29 -#define _SPACC_CTRL_SEC_KEY 31 - -/* CTRL bitmasks for 4.15+ cores */ -#define _SPACC_CTRL_CIPH_ALG_415 0 -#define _SPACC_CTRL_HASH_ALG_415 3 -#define _SPACC_CTRL_CIPH_MODE_415 8 -#define _SPACC_CTRL_HASH_MODE_415 12 - -/********* Virtual Spacc Priority Bitmasks **********/ -#define _SPACC_VPRIO_MODE 0 -#define _SPACC_VPRIO_WEIGHT 8 - -/********* AUX INFO Bitmasks *********/ -#define _SPACC_AUX_INFO_DIR 0 -#define _SPACC_AUX_INFO_BIT_ALIGN 1 -#define _SPACC_AUX_INFO_CBC_CS 16 - -/********* STAT_POP Bitmasks *********/ -#define _SPACC_STAT_POP_POP 0 -#define SPACC_STAT_POP_POP BIT(_SPACC_STAT_POP_POP) - -/********** STATUS Bitmasks **********/ -#define _SPACC_STATUS_SW_ID 0 -#define _SPACC_STATUS_RET_CODE 24 -#define _SPACC_STATUS_SEC_CMD 31 -#define SPACC_GET_STATUS_RET_CODE(s) \ - (((s) >> _SPACC_STATUS_RET_CODE) & 0x7) - -#define SPACC_STATUS_SW_ID_MASK (0xFF << _SPACC_STATUS_SW_ID) -#define SPACC_STATUS_SW_ID_GET(y) \ - (((y) & SPACC_STATUS_SW_ID_MASK) >> _SPACC_STATUS_SW_ID) - -/********** KEY_SZ Bitmasks **********/ -#define _SPACC_KEY_SZ_SIZE 0 -#define _SPACC_KEY_SZ_CTX_IDX 8 -#define _SPACC_KEY_SZ_CIPHER 31 - -#define SPACC_KEY_SZ_CIPHER BIT(_SPACC_KEY_SZ_CIPHER) - -#define SPACC_SET_CIPHER_KEY_SZ(z) \ - (((z) << _SPACC_KEY_SZ_SIZE) | (1UL << _SPACC_KEY_SZ_CIPHER)) -#define SPACC_SET_HASH_KEY_SZ(z) ((z) << _SPACC_KEY_SZ_SIZE) -#define SPACC_SET_KEY_CTX(ctx) ((ctx) << _SPACC_KEY_SZ_CTX_IDX) - -/*****************************************************************************/ - -#define AUX_DIR(a) ((a) << _SPACC_AUX_INFO_DIR) -#define AUX_BIT_ALIGN(a) ((a) << _SPACC_AUX_INFO_BIT_ALIGN) -#define AUX_CBC_CS(a) ((a) << _SPACC_AUX_INFO_CBC_CS) - -#define VPRIO_SET(mode, weight) \ - (((mode) << _SPACC_VPRIO_MODE) | ((weight) << _SPACC_VPRIO_WEIGHT)) - -#ifndef MAX_DDT_ENTRIES -/* add one for null at end of list */ -#define MAX_DDT_ENTRIES \ - ((SPACC_MAX_MSG_MALLOC_SIZE / SPACC_MAX_PARTICLE_SIZE) + 1) -#endif - -#define DDT_ENTRY_SIZE (sizeof(ddt_entry) * MAX_DDT_ENTRIES) - -#ifndef SPACC_MAX_JOBS -#define SPACC_MAX_JOBS BIT(SPACC_SW_CTRL_ID_W) -#endif - -#if SPACC_MAX_JOBS > 256 -# error SPACC_MAX_JOBS cannot exceed 256. -#endif - -#ifndef SPACC_MAX_JOB_BUFFERS -#define SPACC_MAX_JOB_BUFFERS 192 -#endif - -/* max DDT particle size */ -#ifndef SPACC_MAX_PARTICLE_SIZE -#define SPACC_MAX_PARTICLE_SIZE 4096 -#endif - -/* max message size from HW configuration */ -/* usually defined in ICD as (2 exponent 16) -1 */ -#ifndef _SPACC_MAX_MSG_MALLOC_SIZE -#define _SPACC_MAX_MSG_MALLOC_SIZE 16 -#endif -#define SPACC_MAX_MSG_MALLOC_SIZE BIT(_SPACC_MAX_MSG_MALLOC_SIZE) - -#ifndef SPACC_MAX_MSG_SIZE -#define SPACC_MAX_MSG_SIZE (SPACC_MAX_MSG_MALLOC_SIZE - 1) -#endif - -#define SPACC_LOOP_WAIT 1000000 -#define SPACC_CTR_IV_MAX8 ((u32)0xFF) -#define SPACC_CTR_IV_MAX16 ((u32)0xFFFF) -#define SPACC_CTR_IV_MAX32 ((u32)0xFFFFFFFF) -#define SPACC_CTR_IV_MAX64 ((u64)0xFFFFFFFFFFFFFFFF) - -/* cipher algos */ -enum ecipher { - C_NULL = 0, - C_DES = 1, - C_AES = 2, - C_RC4 = 3, - C_MULTI2 = 4, - C_KASUMI = 5, - C_SNOW3G_UEA2 = 6, - C_ZUC_UEA3 = 7, - C_CHACHA20 = 8, - C_SM4 = 9, - C_MAX = 10 -}; - -/* ctrl reg cipher modes */ -enum eciphermode { - CM_ECB = 0, - CM_CBC = 1, - CM_CTR = 2, - CM_CCM = 3, - CM_GCM = 5, - CM_OFB = 7, - CM_CFB = 8, - CM_F8 = 9, - CM_XTS = 10, - CM_MAX = 11 -}; - -enum echachaciphermode { - CM_CHACHA_STREAM = 2, - CM_CHACHA_AEAD = 5 -}; - -enum ehash { - H_NULL = 0, - H_MD5 = 1, - H_SHA1 = 2, - H_SHA224 = 3, - H_SHA256 = 4, - H_SHA384 = 5, - H_SHA512 = 6, - H_XCBC = 7, - H_CMAC = 8, - H_KF9 = 9, - H_SNOW3G_UIA2 = 10, - H_CRC32_I3E802_3 = 11, - H_ZUC_UIA3 = 12, - H_SHA512_224 = 13, - H_SHA512_256 = 14, - H_MICHAEL = 15, - H_SHA3_224 = 16, - H_SHA3_256 = 17, - H_SHA3_384 = 18, - H_SHA3_512 = 19, - H_SHAKE128 = 20, - H_SHAKE256 = 21, - H_POLY1305 = 22, - H_SM3 = 23, - H_SM4_XCBC_MAC = 24, - H_SM4_CMAC = 25, - H_MAX = 26 -}; - -enum ehashmode { - HM_RAW = 0, - HM_SSLMAC = 1, - HM_HMAC = 2, - HM_MAX = 3 -}; - -enum eshakehashmode { - HM_SHAKE_SHAKE = 0, - HM_SHAKE_CSHAKE = 1, - HM_SHAKE_KMAC = 2 -}; - -enum spacc_ret_code { - SPACC_OK = 0, - SPACC_ICVFAIL = 1, - SPACC_MEMERR = 2, - SPACC_BLOCKERR = 3, - SPACC_SECERR = 4 -}; - -enum eicvpos { - IP_ICV_OFFSET = 0, - IP_ICV_APPEND = 1, - IP_ICV_IGNORE = 2, - IP_MAX = 3 -}; - -enum { - /* HASH of plaintext */ - ICV_HASH = 0, - /* HASH the plaintext and encrypt the plaintext and ICV */ - ICV_HASH_ENCRYPT = 1, - /* HASH the ciphertext */ - ICV_ENCRYPT_HASH = 2, - ICV_IGNORE = 3, - IM_MAX = 4 -}; - -enum { - NO_PARTIAL_PCK = 0, - FIRST_PARTIAL_PCK = 1, - MIDDLE_PARTIAL_PCK = 2, - LAST_PARTIAL_PCK = 3 -}; - -enum crypto_modes { - CRYPTO_MODE_NULL, - CRYPTO_MODE_AES_ECB, - CRYPTO_MODE_AES_CBC, - CRYPTO_MODE_AES_CTR, - CRYPTO_MODE_AES_CCM, - CRYPTO_MODE_AES_GCM, - CRYPTO_MODE_AES_F8, - CRYPTO_MODE_AES_XTS, - CRYPTO_MODE_AES_CFB, - CRYPTO_MODE_AES_OFB, - CRYPTO_MODE_AES_CS1, - CRYPTO_MODE_AES_CS2, - CRYPTO_MODE_AES_CS3, - CRYPTO_MODE_MULTI2_ECB, - CRYPTO_MODE_MULTI2_CBC, - CRYPTO_MODE_MULTI2_OFB, - CRYPTO_MODE_MULTI2_CFB, - CRYPTO_MODE_3DES_CBC, - CRYPTO_MODE_3DES_ECB, - CRYPTO_MODE_DES_CBC, - CRYPTO_MODE_DES_ECB, - CRYPTO_MODE_KASUMI_ECB, - CRYPTO_MODE_KASUMI_F8, - CRYPTO_MODE_SNOW3G_UEA2, - CRYPTO_MODE_ZUC_UEA3, - CRYPTO_MODE_CHACHA20_STREAM, - CRYPTO_MODE_CHACHA20_POLY1305, - CRYPTO_MODE_SM4_ECB, - CRYPTO_MODE_SM4_CBC, - CRYPTO_MODE_SM4_CFB, - CRYPTO_MODE_SM4_OFB, - CRYPTO_MODE_SM4_CTR, - CRYPTO_MODE_SM4_CCM, - CRYPTO_MODE_SM4_GCM, - CRYPTO_MODE_SM4_F8, - CRYPTO_MODE_SM4_XTS, - CRYPTO_MODE_SM4_CS1, - CRYPTO_MODE_SM4_CS2, - CRYPTO_MODE_SM4_CS3, - - CRYPTO_MODE_HASH_MD5, - CRYPTO_MODE_HMAC_MD5, - CRYPTO_MODE_HASH_SHA1, - CRYPTO_MODE_HMAC_SHA1, - CRYPTO_MODE_HASH_SHA224, - CRYPTO_MODE_HMAC_SHA224, - CRYPTO_MODE_HASH_SHA256, - CRYPTO_MODE_HMAC_SHA256, - CRYPTO_MODE_HASH_SHA384, - CRYPTO_MODE_HMAC_SHA384, - CRYPTO_MODE_HASH_SHA512, - CRYPTO_MODE_HMAC_SHA512, - CRYPTO_MODE_HASH_SHA512_224, - CRYPTO_MODE_HMAC_SHA512_224, - CRYPTO_MODE_HASH_SHA512_256, - CRYPTO_MODE_HMAC_SHA512_256, - - CRYPTO_MODE_MAC_XCBC, - CRYPTO_MODE_MAC_CMAC, - CRYPTO_MODE_MAC_KASUMI_F9, - CRYPTO_MODE_MAC_SNOW3G_UIA2, - CRYPTO_MODE_MAC_ZUC_UIA3, - CRYPTO_MODE_MAC_POLY1305, - - CRYPTO_MODE_SSLMAC_MD5, - CRYPTO_MODE_SSLMAC_SHA1, - CRYPTO_MODE_HASH_CRC32, - CRYPTO_MODE_MAC_MICHAEL, - - CRYPTO_MODE_HASH_SHA3_224, - CRYPTO_MODE_HASH_SHA3_256, - CRYPTO_MODE_HASH_SHA3_384, - CRYPTO_MODE_HASH_SHA3_512, - - CRYPTO_MODE_HASH_SHAKE128, - CRYPTO_MODE_HASH_SHAKE256, - CRYPTO_MODE_HASH_CSHAKE128, - CRYPTO_MODE_HASH_CSHAKE256, - CRYPTO_MODE_MAC_KMAC128, - CRYPTO_MODE_MAC_KMAC256, - CRYPTO_MODE_MAC_KMACXOF128, - CRYPTO_MODE_MAC_KMACXOF256, - - CRYPTO_MODE_HASH_SM3, - CRYPTO_MODE_HMAC_SM3, - CRYPTO_MODE_MAC_SM4_XCBC, - CRYPTO_MODE_MAC_SM4_CMAC, - - CRYPTO_MODE_LAST -}; - -/* job descriptor */ -typedef void (*spacc_callback)(void *spacc_dev, void *data); - -struct spacc_job { - unsigned long - enc_mode, /* Encryption Algorithm mode */ - hash_mode, /* HASH Algorithm mode */ - icv_len, - icv_offset, - op, /* Operation */ - ctrl, /* CTRL shadow register */ - - /* context just initialized or taken, - * and this is the first use. - */ - first_use, - pre_aad_sz, post_aad_sz, /* size of AAD for the latest packet*/ - hkey_sz, - ckey_sz; - - /* Direction and bit alignment parameters for the AUX_INFO reg */ - unsigned int auxinfo_dir, auxinfo_bit_align; - unsigned int auxinfo_cs_mode; /* AUX info setting for CBC-CS */ - - u32 ctx_idx; - unsigned int job_used, job_swid, job_done, job_err, job_secure; - spacc_callback cb; - void *cbdata; - -}; - -#define SPACC_CTX_IDX_UNUSED 0xFFFFFFFF -#define SPACC_JOB_IDX_UNUSED 0xFFFFFFFF - -struct spacc_ctx { - /* Memory context to store cipher keys*/ - void __iomem *ciph_key; - /* Memory context to store hash keys*/ - void __iomem *hash_key; - /* reference count of jobs using this context */ - int ref_cnt; - /* number of contexts following related to this one */ - int ncontig; -}; - -#define SPACC_CTRL_MASK(field) \ - (1UL << spacc->config.ctrl_map[(field)]) -#define SPACC_CTRL_SET(field, value) \ - ((value) << spacc->config.ctrl_map[(field)]) - -enum { - SPACC_CTRL_VER_0, - SPACC_CTRL_VER_1, - SPACC_CTRL_VER_2, - SPACC_CTRL_VER_SIZE -}; - -enum { - SPACC_CTRL_CIPH_ALG, - SPACC_CTRL_CIPH_MODE, - SPACC_CTRL_HASH_ALG, - SPACC_CTRL_HASH_MODE, - SPACC_CTRL_ENCRYPT, - SPACC_CTRL_CTX_IDX, - SPACC_CTRL_SEC_KEY, - SPACC_CTRL_AAD_COPY, - SPACC_CTRL_ICV_PT, - SPACC_CTRL_ICV_ENC, - SPACC_CTRL_ICV_APPEND, - SPACC_CTRL_KEY_EXP, - SPACC_CTRL_MSG_BEGIN, - SPACC_CTRL_MSG_END, - SPACC_CTRL_MAPSIZE -}; - -struct spacc_device { - void __iomem *regmap; - int zero_key; - - /* hardware configuration */ - struct { - unsigned int version, - pdu_version, - project; - uint32_t max_msg_size; /* max PROCLEN value */ - - unsigned char modes[CRYPTO_MODE_LAST]; - - int num_ctx, /* no. of contexts */ - num_sec_ctx, /* no. of SKP contexts*/ - sec_ctx_page_size, /* page size of SKP context in bytes*/ - ciph_page_size, /* cipher context page size in bytes*/ - hash_page_size, /* hash context page size in bytes*/ - string_size, - is_qos, /* QOS spacc? */ - is_pdu, /* PDU spacc? */ - is_secure, - is_secure_port, /* Are we on the secure port? */ - is_partial, /* Is partial processing enabled? */ - is_ivimport, /* is ivimport enabled? */ - dma_type, /* DMA type: linear or scattergather */ - idx, /* Which virtual spacc IDX is this? */ - priority, /* Weighted priority of virtual spacc */ - cmd0_fifo_depth, /* CMD FIFO depths */ - cmd1_fifo_depth, - cmd2_fifo_depth, - stat_fifo_depth, /* depth of STATUS FIFO */ - fifo_cnt, - ideal_stat_level, - spacc_endian; - - uint32_t wd_timer; - u64 oldtimer, timer; - - const u8 *ctrl_map; /* map of ctrl register field offsets */ - } config; - - struct spacc_job_buffer { - int active; - int job_idx; - struct pdu_ddt *src, *dst; - u32 proc_sz, aad_offset, pre_aad_sz, - post_aad_sz, iv_offset, prio; - } job_buffer[SPACC_MAX_JOB_BUFFERS]; - - int jb_head, jb_tail; - - int op_mode, /* operating mode and watchdog functionality */ - wdcnt; /* number of pending WD IRQs*/ - - /* SW_ID value which will be used for next job. */ - unsigned int job_next_swid; - - struct spacc_ctx *ctx; /* This size changes per configured device */ - struct spacc_job *job; /* allocate memory for [SPACC_MAX_JOBS]; */ - int job_lookup[SPACC_MAX_JOBS]; /* correlate SW_ID back to job index */ - - spinlock_t lock; /* lock for register access */ - spinlock_t ctx_lock; /* lock for context manager */ - - /* callback functions for IRQ processing */ - void (*irq_cb_cmdx)(struct spacc_device *spacc, int x); - void (*irq_cb_stat)(struct spacc_device *spacc); - void (*irq_cb_stat_wd)(struct spacc_device *spacc); - - /* this is called after jobs have been popped off the STATUS FIFO - * useful so you can be told when there might be space available - * in the CMD FIFO - */ - void (*spacc_notify_jobs)(struct spacc_device *spacc); - - /* cache*/ - struct { - u32 src_ptr, - dst_ptr, - proc_len, - icv_len, - icv_offset, - pre_aad, - post_aad, - iv_offset, - offset, - aux; - } cache; - - struct device *dptr; -}; - -enum { - SPACC_IRQ_MODE_WD = 1, /* use WD*/ - SPACC_IRQ_MODE_STEP = 2 /* older use CMD/STAT stepping */ -}; - -enum { - SPACC_IRQ_CMD_GET = 0, - SPACC_IRQ_CMD_SET = 1 -}; - -struct spacc_priv { - struct spacc_device spacc; - struct semaphore core_running; - struct tasklet_struct pop_jobs; - spinlock_t hw_lock; - unsigned long max_msg_len; -}; - - -int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctx, - int secure_mode, spacc_callback cb, void *cbdata); -int spacc_clone_handle(struct spacc_device *spacc, int old_handle, - void *cbdata); -int spacc_close(struct spacc_device *spacc, int job_idx); -int spacc_set_operation(struct spacc_device *spacc, int job_idx, int op, - u32 prot, uint32_t icvcmd, uint32_t icvoff, - uint32_t icvsz, uint32_t sec_key); -int spacc_set_key_exp(struct spacc_device *spacc, int job_idx); - -int spacc_packet_enqueue_ddt_ex(struct spacc_device *spacc, int use_jb, - int job_idx, struct pdu_ddt *src_ddt, struct pdu_ddt *dst_ddt, - u32 proc_sz, uint32_t aad_offset, uint32_t pre_aad_sz, - u32 post_aad_sz, uint32_t iv_offset, uint32_t prio); -int spacc_packet_enqueue_ddt(struct spacc_device *spacc, int job_idx, - struct pdu_ddt *src_ddt, struct pdu_ddt *dst_ddt, - uint32_t proc_sz, u32 aad_offset, uint32_t pre_aad_sz, - uint32_t post_aad_sz, u32 iv_offset, uint32_t prio); - -/* IRQ handling functions */ -void spacc_irq_cmdx_enable(struct spacc_device *spacc, int cmdx, int cmdx_cnt); -void spacc_irq_cmdx_disable(struct spacc_device *spacc, int cmdx); -void spacc_irq_stat_enable(struct spacc_device *spacc, int stat_cnt); -void spacc_irq_stat_disable(struct spacc_device *spacc); -void spacc_irq_stat_wd_enable(struct spacc_device *spacc); -void spacc_irq_stat_wd_disable(struct spacc_device *spacc); -void spacc_irq_glbl_enable(struct spacc_device *spacc); -void spacc_irq_glbl_disable(struct spacc_device *spacc); -uint32_t spacc_process_irq(struct spacc_device *spacc); -void spacc_set_wd_count(struct spacc_device *spacc, uint32_t val); -irqreturn_t spacc_irq_handler(int irq, void *dev); -int spacc_sgs_to_ddt(struct device *dev, - struct scatterlist *sg1, int len1, int *ents1, - struct scatterlist *sg2, int len2, int *ents2, - struct scatterlist *sg3, int len3, int *ents3, - struct pdu_ddt *ddt, int dma_direction); -int spacc_sg_to_ddt(struct device *dev, struct scatterlist *sg, - int nbytes, struct pdu_ddt *ddt, int dma_direction); - -/* Context Manager */ -void spacc_ctx_init_all(struct spacc_device *spacc); - -/* SPAcc specific manipulation of context memory */ -int spacc_write_context(struct spacc_device *spacc, int job_idx, int op, - const unsigned char *key, int ksz, - const unsigned char *iv, int ivsz); - -int spacc_read_context(struct spacc_device *spacc, int job_idx, int op, - unsigned char *key, int ksz, unsigned char *iv, - int ivsz); - -/* Job Manager */ -void spacc_job_init_all(struct spacc_device *spacc); -int spacc_job_request(struct spacc_device *dev, int job_idx); -int spacc_job_release(struct spacc_device *dev, int job_idx); -int spacc_handle_release(struct spacc_device *spacc, int job_idx); - -/* Helper functions */ -struct spacc_ctx *context_lookup_by_job(struct spacc_device *spacc, - int job_idx); -int spacc_isenabled(struct spacc_device *spacc, int mode, int keysize); -int spacc_compute_xcbc_key(struct spacc_device *spacc, int mode_id, - int job_idx, const unsigned char *key, - int keylen, unsigned char *xcbc_out); - -int spacc_process_jb(struct spacc_device *spacc); -int spacc_remove(struct platform_device *pdev); -int spacc_static_config(struct spacc_device *spacc); -int spacc_autodetect(struct spacc_device *spacc); -void spacc_pop_jobs(unsigned long data); -void spacc_fini(struct spacc_device *spacc); -int spacc_init(void __iomem *baseaddr, struct spacc_device *spacc, - struct pdu_info *info); -int spacc_pop_packets(struct spacc_device *spacc, int *num_popped); -void spacc_stat_process(struct spacc_device *spacc); -void spacc_cmd_process(struct spacc_device *spacc, int x); - -#endif diff --git a/drivers/crypto/dwc-spacc/spacc_device.c b/drivers/crypto/dwc-spacc/spacc_device.c deleted file mode 100644 index 964ccdf294e3..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_device.c +++ /dev/null @@ -1,338 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include -#include -#include "spacc_device.h" - -static struct platform_device *spacc_pdev[MAX_DEVICES]; - -#define VSPACC_PRIORITY_MAX 15 - -void spacc_cmd_process(struct spacc_device *spacc, int x) -{ - struct spacc_priv *priv = container_of(spacc, struct spacc_priv, spacc); - - /* run tasklet to pop jobs off fifo */ - tasklet_schedule(&priv->pop_jobs); -} -void spacc_stat_process(struct spacc_device *spacc) -{ - struct spacc_priv *priv = container_of(spacc, struct spacc_priv, spacc); - - /* run tasklet to pop jobs off fifo */ - tasklet_schedule(&priv->pop_jobs); -} - - -int spacc_probe(struct platform_device *pdev, - const struct of_device_id snps_spacc_id[]) -{ - int spacc_idx = -1; - struct resource *mem; - int spacc_endian = 0; - void __iomem *baseaddr; - struct pdu_info info; - int spacc_priority = -1; - struct spacc_priv *priv; - int x = 0, err, oldmode, irq_num; - const struct of_device_id *match, *id; - u64 oldtimer = 100000, timer = 100000; - - if (pdev->dev.of_node) { - id = of_match_node(snps_spacc_id, pdev->dev.of_node); - if (!id) { - dev_err(&pdev->dev, "DT node did not match\n"); - return -EINVAL; - } - } - - /* Initialize DDT DMA pools based on this device's resources */ - if (pdu_mem_init(&pdev->dev)) { - dev_err(&pdev->dev, "Could not initialize DMA pools\n"); - return -ENOMEM; - } - - match = of_match_device(of_match_ptr(snps_spacc_id), &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "SPAcc dtb missing"); - return -ENODEV; - } - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_err(&pdev->dev, "no memory resource for spacc\n"); - err = -ENXIO; - goto free_ddt_mem_pool; - } - - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - err = -ENOMEM; - goto free_ddt_mem_pool; - } - - /* Read spacc priority and index and save inside priv.spacc.config */ - if (of_property_read_u32(pdev->dev.of_node, "spacc_priority", - &spacc_priority)) { - dev_err(&pdev->dev, "No vspacc priority specified\n"); - err = -EINVAL; - goto free_ddt_mem_pool; - } - - if (spacc_priority < 0 && spacc_priority > VSPACC_PRIORITY_MAX) { - dev_err(&pdev->dev, "Invalid vspacc priority\n"); - err = -EINVAL; - goto free_ddt_mem_pool; - } - priv->spacc.config.priority = spacc_priority; - - if (of_property_read_u32(pdev->dev.of_node, "spacc_index", - &spacc_idx)) { - dev_err(&pdev->dev, "No vspacc index specified\n"); - err = -EINVAL; - goto free_ddt_mem_pool; - } - priv->spacc.config.idx = spacc_idx; - - if (of_property_read_u32(pdev->dev.of_node, "spacc_endian", - &spacc_endian)) { - dev_dbg(&pdev->dev, "No spacc_endian specified\n"); - dev_dbg(&pdev->dev, "Default spacc Endianness (0==little)\n"); - spacc_endian = 0; - } - priv->spacc.config.spacc_endian = spacc_endian; - - if (of_property_read_u64(pdev->dev.of_node, "oldtimer", - &oldtimer)) { - dev_dbg(&pdev->dev, "No oldtimer specified\n"); - dev_dbg(&pdev->dev, "Default oldtimer (100000)\n"); - oldtimer = 100000; - } - priv->spacc.config.oldtimer = oldtimer; - - if (of_property_read_u64(pdev->dev.of_node, "timer", &timer)) { - dev_dbg(&pdev->dev, "No timer specified\n"); - dev_dbg(&pdev->dev, "Default timer (100000)\n"); - timer = 100000; - } - priv->spacc.config.timer = timer; - - baseaddr = devm_ioremap_resource(&pdev->dev, mem); - if (IS_ERR(baseaddr)) { - dev_err(&pdev->dev, "unable to map iomem\n"); - err = PTR_ERR(baseaddr); - goto free_ddt_mem_pool; - } - - pdu_get_version(baseaddr, &info); - if (pdev->dev.platform_data) { - struct pdu_info *parent_info = pdev->dev.platform_data; - - memcpy(&info.pdu_config, &parent_info->pdu_config, - sizeof(info.pdu_config)); - } - - dev_dbg(&pdev->dev, "EPN %04X : virt [%d]\n", - info.spacc_version.project, - info.spacc_version.vspacc_idx); - - /* Validate virtual spacc index with vspacc count read from - * VERSION_EXT.VSPACC_CNT. Thus vspacc count=3, gives valid index 0,1,2 - */ - if (spacc_idx != info.spacc_version.vspacc_idx) { - dev_err(&pdev->dev, "DTS vspacc_idx mismatch read value\n"); - err = -EINVAL; - goto free_ddt_mem_pool; - } - - if (spacc_idx < 0 || spacc_idx > (info.spacc_config.num_vspacc - 1)) { - dev_err(&pdev->dev, "Invalid vspacc index specified\n"); - err = -EINVAL; - goto free_ddt_mem_pool; - } - - err = spacc_init(baseaddr, &priv->spacc, &info); - if (err != CRYPTO_OK) { - dev_err(&pdev->dev, "Failed to initialize device %d\n", x); - err = -ENXIO; - goto free_ddt_mem_pool; - } - - spin_lock_init(&priv->hw_lock); - spacc_irq_glbl_disable(&priv->spacc); - tasklet_init(&priv->pop_jobs, spacc_pop_jobs, (unsigned long)priv); - - priv->spacc.dptr = &pdev->dev; - platform_set_drvdata(pdev, priv); - - irq_num = platform_get_irq(pdev, 0); - if (irq_num < 0) { - dev_err(&pdev->dev, "no irq resource for spacc\n"); - err = -ENXIO; - goto free_ddt_mem_pool; - } - - /* Determine configured maximum message length. */ - priv->max_msg_len = priv->spacc.config.max_msg_size; - - if (devm_request_irq(&pdev->dev, irq_num, spacc_irq_handler, - IRQF_SHARED, dev_name(&pdev->dev), - &pdev->dev)) { - dev_err(&pdev->dev, "failed to request IRQ\n"); - err = -EBUSY; - goto err_tasklet_kill; - } - - priv->spacc.irq_cb_stat = spacc_stat_process; - priv->spacc.irq_cb_cmdx = spacc_cmd_process; - oldmode = priv->spacc.op_mode; - priv->spacc.op_mode = SPACC_OP_MODE_IRQ; - - spacc_irq_stat_enable(&priv->spacc, 1); - spacc_irq_cmdx_enable(&priv->spacc, 0, 1); - spacc_irq_stat_wd_disable(&priv->spacc); - spacc_irq_glbl_enable(&priv->spacc); - - -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AUTODETECT) - err = spacc_autodetect(&priv->spacc); - if (err < 0) { - spacc_irq_glbl_disable(&priv->spacc); - goto err_tasklet_kill; - } -#else - err = spacc_static_config(&priv->spacc); - if (err < 0) { - spacc_irq_glbl_disable(&priv->spacc); - goto err_tasklet_kill; - } -#endif - - priv->spacc.op_mode = oldmode; - - if (priv->spacc.op_mode == SPACC_OP_MODE_IRQ) { - priv->spacc.irq_cb_stat = spacc_stat_process; - priv->spacc.irq_cb_cmdx = spacc_cmd_process; - - spacc_irq_stat_enable(&priv->spacc, 1); - spacc_irq_cmdx_enable(&priv->spacc, 0, 1); - spacc_irq_glbl_enable(&priv->spacc); - } else { - priv->spacc.irq_cb_stat = spacc_stat_process; - priv->spacc.irq_cb_stat_wd = spacc_stat_process; - - spacc_irq_stat_enable(&priv->spacc, - priv->spacc.config.ideal_stat_level); - - spacc_irq_cmdx_disable(&priv->spacc, 0); - spacc_irq_stat_wd_enable(&priv->spacc); - spacc_irq_glbl_enable(&priv->spacc); - - /* enable the wd by setting the wd_timer = 100000 */ - spacc_set_wd_count(&priv->spacc, - priv->spacc.config.wd_timer = - priv->spacc.config.timer); - } - - /* unlock normal*/ - if (priv->spacc.config.is_secure_port) { - u32 t; - - t = readl(baseaddr + SPACC_REG_SECURE_CTRL); - t &= ~(1UL << 31); - writel(t, baseaddr + SPACC_REG_SECURE_CTRL); - } - - /* unlock device by default */ - writel(0, baseaddr + SPACC_REG_SECURE_CTRL); - - return err; - -err_tasklet_kill: - tasklet_kill(&priv->pop_jobs); - spacc_fini(&priv->spacc); - -free_ddt_mem_pool: - pdu_mem_deinit(&pdev->dev); - - return err; -} - -static void spacc_unregister_algs(void) -{ -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_HASH) - spacc_unregister_hash_algs(); -#endif -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AEAD) - spacc_unregister_aead_algs(); -#endif -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_CIPHER) - spacc_unregister_cipher_algs(); -#endif -} - -static const struct of_device_id snps_spacc_id[] = { - {.compatible = "snps-dwc-spacc" }, - { /*sentinel */ } -}; - -MODULE_DEVICE_TABLE(of, snps_spacc_id); - -static int spacc_crypto_probe(struct platform_device *pdev) -{ - int rc; - - rc = spacc_probe(pdev, snps_spacc_id); - if (rc < 0) - goto err; - - spacc_pdev[0] = pdev; - -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_HASH) - rc = probe_hashes(pdev); - if (rc < 0) - goto err; -#endif - -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_CIPHER) - rc = probe_ciphers(pdev); - if (rc < 0) - goto err; -#endif - -#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AEAD) - rc = probe_aeads(pdev); - if (rc < 0) - goto err; -#endif - - return 0; -err: - spacc_unregister_algs(); - - return rc; -} - -static void spacc_crypto_remove(struct platform_device *pdev) -{ - spacc_unregister_algs(); - spacc_remove(pdev); -} - -static struct platform_driver spacc_driver = { - .probe = spacc_crypto_probe, - .remove = spacc_crypto_remove, - .driver = { - .name = "spacc", - .of_match_table = of_match_ptr(snps_spacc_id), - .owner = THIS_MODULE, - }, -}; - -module_platform_driver(spacc_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Synopsys, Inc."); -MODULE_DESCRIPTION("SPAcc Crypto Accelerator Driver"); diff --git a/drivers/crypto/dwc-spacc/spacc_device.h b/drivers/crypto/dwc-spacc/spacc_device.h deleted file mode 100644 index be7fde25046b..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_device.h +++ /dev/null @@ -1,231 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef SPACC_DEVICE_H_ -#define SPACC_DEVICE_H_ - -#include -#include -#include -#include -#include "spacc_core.h" - -#define MODE_TAB_AEAD(_name, _ciph, _hash, _hashlen, _ivlen, _blocklen) \ - .name = _name, .aead = { .ciph = _ciph, .hash = _hash }, \ - .hashlen = _hashlen, .ivlen = _ivlen, .blocklen = _blocklen - -/* Helper macros for initializing the hash/cipher tables. */ -#define MODE_TAB_COMMON(_name, _id_name, _blocklen) \ - .name = _name, .id = CRYPTO_MODE_##_id_name, .blocklen = _blocklen - -#define MODE_TAB_HASH(_name, _id_name, _hashlen, _blocklen) \ - MODE_TAB_COMMON(_name, _id_name, _blocklen), \ - .hashlen = _hashlen, .testlen = _hashlen - -#define MODE_TAB_CIPH(_name, _id_name, _ivlen, _blocklen) \ - MODE_TAB_COMMON(_name, _id_name, _blocklen), \ - .ivlen = _ivlen - -#define MODE_TAB_HASH_XCBC 0x8000 - -#define SPACC_MAX_DIGEST_SIZE 64 -#define SPACC_MAX_KEY_SIZE 32 -#define SPACC_MAX_IV_SIZE 16 - -#define SPACC_DMA_ALIGN 4 -#define SPACC_DMA_BOUNDARY 0x10000 - -#define MAX_DEVICES 2 -/* flag means the IV is computed from setkey and crypt*/ -#define SPACC_MANGLE_IV_FLAG 0x8000 - -/* we're doing a CTR mangle (for RFC3686/IPsec)*/ -#define SPACC_MANGLE_IV_RFC3686 0x0100 - -/* we're doing GCM */ -#define SPACC_MANGLE_IV_RFC4106 0x0200 - -/* we're doing GMAC */ -#define SPACC_MANGLE_IV_RFC4543 0x0300 - -/* we're doing CCM */ -#define SPACC_MANGLE_IV_RFC4309 0x0400 - -/* we're doing SM4 GCM/CCM */ -#define SPACC_MANGLE_IV_RFC8998 0x0500 - -#define CRYPTO_MODE_AES_CTR_RFC3686 (CRYPTO_MODE_AES_CTR \ - | SPACC_MANGLE_IV_FLAG \ - | SPACC_MANGLE_IV_RFC3686) -#define CRYPTO_MODE_AES_GCM_RFC4106 (CRYPTO_MODE_AES_GCM \ - | SPACC_MANGLE_IV_FLAG \ - | SPACC_MANGLE_IV_RFC4106) -#define CRYPTO_MODE_AES_GCM_RFC4543 (CRYPTO_MODE_AES_GCM \ - | SPACC_MANGLE_IV_FLAG \ - | SPACC_MANGLE_IV_RFC4543) -#define CRYPTO_MODE_AES_CCM_RFC4309 (CRYPTO_MODE_AES_CCM \ - | SPACC_MANGLE_IV_FLAG \ - | SPACC_MANGLE_IV_RFC4309) -#define CRYPTO_MODE_SM4_GCM_RFC8998 (CRYPTO_MODE_SM4_GCM) -#define CRYPTO_MODE_SM4_CCM_RFC8998 (CRYPTO_MODE_SM4_CCM) - -struct spacc_crypto_ctx { - struct device *dev; - - spinlock_t lock; - struct list_head jobs; - int handle, mode, auth_size, key_len; - unsigned char *cipher_key; - - /* - * Indicates that the H/W context has been setup and can be used for - * crypto; otherwise, the software fallback will be used. - */ - bool ctx_valid; - unsigned int flag_ppp; - - /* salt used for rfc3686/givencrypt mode */ - unsigned char csalt[16]; - u8 ipad[128] __aligned(sizeof(u32)); - u8 digest_ctx_buf[128] __aligned(sizeof(u32)); - u8 tmp_buffer[128] __aligned(sizeof(u32)); - - /* Save keylen from setkey */ - int keylen; - u8 key[256]; - int zero_key; - unsigned char *tmp_sgl_buff; - struct scatterlist *tmp_sgl; - - union{ - struct crypto_ahash *hash; - struct crypto_aead *aead; - struct crypto_skcipher *cipher; - } fb; -}; - -struct spacc_crypto_reqctx { - struct pdu_ddt src, dst; - void *digest_buf, *iv_buf; - dma_addr_t digest_dma; - int dst_nents, src_nents, aead_nents, total_nents; - int encrypt_op, mode, single_shot; - unsigned int spacc_cipher_cryptlen, rem_nents; - - struct aead_cb_data { - int new_handle; - struct spacc_crypto_ctx *tctx; - struct spacc_crypto_reqctx *ctx; - struct aead_request *req; - struct spacc_device *spacc; - } cb; - - struct ahash_cb_data { - int new_handle; - struct spacc_crypto_ctx *tctx; - struct spacc_crypto_reqctx *ctx; - struct ahash_request *req; - struct spacc_device *spacc; - } acb; - - struct cipher_cb_data { - int new_handle; - struct spacc_crypto_ctx *tctx; - struct spacc_crypto_reqctx *ctx; - struct skcipher_request *req; - struct spacc_device *spacc; - } ccb; - - union { - struct ahash_request hash_req; - struct skcipher_request cipher_req; - struct aead_request aead_req; - } fb; -}; - -struct mode_tab { - char name[128]; - - int valid; - - /* mode ID used in hash/cipher mode but not aead*/ - int id; - - /* ciph/hash mode used in aead */ - struct { - int ciph, hash; - } aead; - - unsigned int hashlen, ivlen, blocklen, keylen[3]; - unsigned int keylen_mask, testlen; - unsigned int chunksize, walksize, min_keysize, max_keysize; - - bool sw_fb; - - union { - unsigned char hash_test[SPACC_MAX_DIGEST_SIZE]; - unsigned char ciph_test[3][2 * SPACC_MAX_IV_SIZE]; - }; -}; - -struct spacc_alg { - struct mode_tab *mode; - unsigned int keylen_mask; - - struct device *dev[MAX_DEVICES]; - - struct list_head list; - struct crypto_alg *calg; - struct crypto_tfm *tfm; - - union { - struct ahash_alg hash; - struct aead_alg aead; - struct skcipher_alg skcipher; - } alg; -}; - -static inline const struct spacc_alg *spacc_tfm_ahash(struct crypto_tfm *tfm) -{ - const struct crypto_alg *calg = tfm->__crt_alg; - - if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) - return container_of(calg, struct spacc_alg, alg.hash.halg.base); - - return NULL; -} - -static inline const struct spacc_alg *spacc_tfm_skcipher(struct crypto_tfm *tfm) -{ - const struct crypto_alg *calg = tfm->__crt_alg; - - if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_SKCIPHER) - return container_of(calg, struct spacc_alg, alg.skcipher.base); - - return NULL; -} - -static inline const struct spacc_alg *spacc_tfm_aead(struct crypto_tfm *tfm) -{ - const struct crypto_alg *calg = tfm->__crt_alg; - - if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AEAD) - return container_of(calg, struct spacc_alg, alg.aead.base); - - return NULL; -} - -int probe_hashes(struct platform_device *spacc_pdev); -int spacc_unregister_hash_algs(void); - -int probe_aeads(struct platform_device *spacc_pdev); -int spacc_unregister_aead_algs(void); - -int probe_ciphers(struct platform_device *spacc_pdev); -int spacc_unregister_cipher_algs(void); - -int spacc_probe(struct platform_device *pdev, - const struct of_device_id snps_spacc_id[]); - -irqreturn_t spacc_irq_handler(int irq, void *dev); -#endif diff --git a/drivers/crypto/dwc-spacc/spacc_hal.c b/drivers/crypto/dwc-spacc/spacc_hal.c deleted file mode 100644 index 0d460c4df542..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_hal.c +++ /dev/null @@ -1,367 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include "spacc_hal.h" - -static struct dma_pool *ddt_pool, *ddt16_pool, *ddt4_pool; -static struct device *ddt_device; - -#define PDU_REG_SPACC_VERSION 0x00180UL -#define PDU_REG_SPACC_CONFIG 0x00184UL -#define PDU_REG_SPACC_CONFIG2 0x00190UL -#define PDU_REG_SPACC_IV_OFFSET 0x00040UL -#define PDU_REG_PDU_CONFIG 0x00188UL -#define PDU_REG_SECURE_LOCK 0x001C0UL - -int pdu_get_version(void __iomem *dev, struct pdu_info *inf) -{ - unsigned long tmp; - - if (!inf) - return -1; - - memset(inf, 0, sizeof(*inf)); - tmp = readl(dev + PDU_REG_SPACC_VERSION); - - /* Read the SPAcc version block this tells us the revision, - * project, and a few other feature bits - * - * layout for v6.5+ - */ - inf->spacc_version = (struct spacc_version_block) { - .minor = SPACC_ID_MINOR(tmp), - .major = SPACC_ID_MAJOR(tmp), - .version = (SPACC_ID_MAJOR(tmp) << 4) | SPACC_ID_MINOR(tmp), - .qos = SPACC_ID_QOS(tmp), - .is_spacc = SPACC_ID_TYPE(tmp) == SPACC_TYPE_SPACCQOS, - .is_pdu = SPACC_ID_TYPE(tmp) == SPACC_TYPE_PDU, - .aux = SPACC_ID_AUX(tmp), - .vspacc_idx = SPACC_ID_VIDX(tmp), - .partial = SPACC_ID_PARTIAL(tmp), - .project = SPACC_ID_PROJECT(tmp), - }; - - /* try to autodetect */ - writel(0x80000000, dev + PDU_REG_SPACC_IV_OFFSET); - - if (readl(dev + PDU_REG_SPACC_IV_OFFSET) == 0x80000000) - inf->spacc_version.ivimport = 1; - else - inf->spacc_version.ivimport = 0; - - - /* Read the SPAcc config block (v6.5+) which tells us how many - * contexts there are and context page sizes - * this register is only available in v6.5 and up - */ - tmp = readl(dev + PDU_REG_SPACC_CONFIG); - inf->spacc_config = (struct spacc_config_block) { - SPACC_CFG_CTX_CNT(tmp), - SPACC_CFG_VSPACC_CNT(tmp), - SPACC_CFG_CIPH_CTX_SZ(tmp), - SPACC_CFG_HASH_CTX_SZ(tmp), - SPACC_CFG_DMA_TYPE(tmp), - 0, 0, 0, 0 - }; - - /* CONFIG2 only present in v6.5+ cores */ - tmp = readl(dev + PDU_REG_SPACC_CONFIG2); - if (inf->spacc_version.qos) { - inf->spacc_config.cmd0_fifo_depth = - SPACC_CFG_CMD0_FIFO_QOS(tmp); - inf->spacc_config.cmd1_fifo_depth = - SPACC_CFG_CMD1_FIFO(tmp); - inf->spacc_config.cmd2_fifo_depth = - SPACC_CFG_CMD2_FIFO(tmp); - inf->spacc_config.stat_fifo_depth = - SPACC_CFG_STAT_FIFO_QOS(tmp); - } else { - inf->spacc_config.cmd0_fifo_depth = - SPACC_CFG_CMD0_FIFO(tmp); - inf->spacc_config.stat_fifo_depth = - SPACC_CFG_STAT_FIFO(tmp); - } - - /* only read PDU config if it's actually a PDU engine */ - if (inf->spacc_version.is_pdu) { - tmp = readl(dev + PDU_REG_PDU_CONFIG); - inf->pdu_config = (struct pdu_config_block) - {SPACC_PDU_CFG_MINOR(tmp), - SPACC_PDU_CFG_MAJOR(tmp)}; - - /* unlock all cores by default */ - writel(0, dev + PDU_REG_SECURE_LOCK); - } - - return 0; -} - -void pdu_to_dev(void __iomem *addr_, uint32_t *src, unsigned long nword) -{ - void __iomem *addr = addr_; - - while (nword--) { - writel(*src++, addr); - addr += 4; - } -} - -void pdu_from_dev(u32 *dst, void __iomem *addr_, unsigned long nword) -{ - void __iomem *addr = addr_; - - while (nword--) { - *dst++ = readl(addr); - addr += 4; - } -} - -static void pdu_to_dev_big(void __iomem *addr_, const unsigned char *src, - unsigned long nword) -{ - unsigned long v; - void __iomem *addr = addr_; - - while (nword--) { - v = 0; - v = (v << 8) | ((unsigned long)*src++); - v = (v << 8) | ((unsigned long)*src++); - v = (v << 8) | ((unsigned long)*src++); - v = (v << 8) | ((unsigned long)*src++); - writel(v, addr); - addr += 4; - } -} - -static void pdu_from_dev_big(unsigned char *dst, void __iomem *addr_, - unsigned long nword) -{ - unsigned long v; - void __iomem *addr = addr_; - - while (nword--) { - v = readl(addr); - addr += 4; - *dst++ = (v >> 24) & 0xFF; v <<= 8; - *dst++ = (v >> 24) & 0xFF; v <<= 8; - *dst++ = (v >> 24) & 0xFF; v <<= 8; - *dst++ = (v >> 24) & 0xFF; v <<= 8; - } -} - -static void pdu_to_dev_little(void __iomem *addr_, const unsigned char *src, - unsigned long nword) -{ - unsigned long v; - void __iomem *addr = addr_; - - while (nword--) { - v = 0; - v = (v >> 8) | ((unsigned long)*src++ << 24UL); - v = (v >> 8) | ((unsigned long)*src++ << 24UL); - v = (v >> 8) | ((unsigned long)*src++ << 24UL); - v = (v >> 8) | ((unsigned long)*src++ << 24UL); - writel(v, addr); - addr += 4; - } -} - -static void pdu_from_dev_little(unsigned char *dst, void __iomem *addr_, - unsigned long nword) -{ - unsigned long v; - void __iomem *addr = addr_; - - while (nword--) { - v = readl(addr); - addr += 4; - *dst++ = v & 0xFF; v >>= 8; - *dst++ = v & 0xFF; v >>= 8; - *dst++ = v & 0xFF; v >>= 8; - *dst++ = v & 0xFF; v >>= 8; - } -} - -void pdu_to_dev_s(void __iomem *addr, const unsigned char *src, - unsigned long nword, int endian) -{ - if (endian) - pdu_to_dev_big(addr, src, nword); - else - pdu_to_dev_little(addr, src, nword); -} - -void pdu_from_dev_s(unsigned char *dst, void __iomem *addr, - unsigned long nword, int endian) -{ - if (endian) - pdu_from_dev_big(dst, addr, nword); - else - pdu_from_dev_little(dst, addr, nword); -} - -void pdu_io_cached_write(void __iomem *addr, unsigned long val, - uint32_t *cache) -{ - if (*cache == val) { -#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_IO - pr_debug("PDU: write %.8lx -> %p (cached)\n", val, addr); -#endif - return; - } - - *cache = val; - writel(val, addr); -} - -struct device *get_ddt_device(void) -{ - return ddt_device; -} - -/* Platform specific DDT routines */ - -/* create a DMA pool for DDT entries this should help from splitting - * pages for DDTs which by default are 520 bytes long meaning we would - * otherwise waste 3576 bytes per DDT allocated... - * we also maintain a smaller table of 4 entries common for simple jobs - * which uses 480 fewer bytes of DMA memory. - * and for good measure another table for 16 entries saving 384 bytes - */ -int pdu_mem_init(void *device) -{ - if (ddt_device) - return 0; /* Already setup */ - - ddt_device = device; - ddt_pool = dma_pool_create("spaccddt", device, (PDU_MAX_DDT + 1) * 8, - 8, 0); /* max of 64 DDT entries */ - - if (!ddt_pool) - return -1; - -#if PDU_MAX_DDT > 16 - /* max of 16 DDT entries */ - ddt16_pool = dma_pool_create("spaccddt16", device, (16 + 1) * 8, 8, 0); - if (!ddt16_pool) { - dma_pool_destroy(ddt_pool); - return -1; - } -#else - ddt16_pool = ddt_pool; -#endif - /* max of 4 DDT entries */ - ddt4_pool = dma_pool_create("spaccddt4", device, (4 + 1) * 8, 8, 0); - if (!ddt4_pool) { - dma_pool_destroy(ddt_pool); -#if PDU_MAX_DDT > 16 - dma_pool_destroy(ddt16_pool); -#endif - return -1; - } - - return 0; -} - -/* destroy the pool */ -void pdu_mem_deinit(void *device) -{ - /* For now, just skip deinit except for matching device */ - if (device != ddt_device) - return; - - dma_pool_destroy(ddt_pool); - -#if PDU_MAX_DDT > 16 - dma_pool_destroy(ddt16_pool); -#endif - dma_pool_destroy(ddt4_pool); - - ddt_device = NULL; -} - -int pdu_ddt_init(struct pdu_ddt *ddt, unsigned long limit) -{ - /* set the MSB if we want to use an ATOMIC - * allocation required for top half processing - */ - int flag = (limit & 0x80000000); - - limit &= 0x7FFFFFFF; - if (limit + 1 >= SIZE_MAX / 8) { - /* Too big to even compute DDT size */ - return -1; - } else if (limit > PDU_MAX_DDT) { - size_t len = 8 * ((size_t)limit + 1); - - ddt->virt = dma_alloc_coherent(ddt_device, len, &ddt->phys, - flag ? GFP_ATOMIC : GFP_KERNEL); - } else if (limit > 16) { - ddt->virt = dma_pool_alloc(ddt_pool, flag ? GFP_ATOMIC : - GFP_KERNEL, &ddt->phys); - } else if (limit > 4) { - ddt->virt = dma_pool_alloc(ddt16_pool, flag ? GFP_ATOMIC : - GFP_KERNEL, &ddt->phys); - } else { - ddt->virt = dma_pool_alloc(ddt4_pool, flag ? GFP_ATOMIC : - GFP_KERNEL, &ddt->phys); - } - - ddt->idx = 0; - ddt->len = 0; - ddt->limit = limit; - - if (!ddt->virt) - return -1; - -#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT - pr_debug(" DDT[%.8lx]: allocated %lu fragments\n", - (unsigned long)ddt->phys, limit); -#endif - - return 0; -} - -int pdu_ddt_add(struct pdu_ddt *ddt, dma_addr_t phys, unsigned long size) -{ -#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT - pr_debug(" DDT[%.8lx]: 0x%.8lx size %lu\n", - (unsigned long)ddt->phys, - (unsigned long)phys, size); -#endif - - if (ddt->idx == ddt->limit) - return -1; - - ddt->virt[ddt->idx * 2 + 0] = (uint32_t)phys; - ddt->virt[ddt->idx * 2 + 1] = size; - ddt->virt[ddt->idx * 2 + 2] = 0; - ddt->virt[ddt->idx * 2 + 3] = 0; - ddt->len += size; - ++(ddt->idx); - - return 0; -} - -int pdu_ddt_free(struct pdu_ddt *ddt) -{ - if (ddt->virt) { - if (ddt->limit > PDU_MAX_DDT) { - size_t len = 8 * ((size_t)ddt->limit + 1); - - dma_free_coherent(ddt_device, len, ddt->virt, - ddt->phys); - } else if (ddt->limit > 16) { - dma_pool_free(ddt_pool, ddt->virt, ddt->phys); - } else if (ddt->limit > 4) { - dma_pool_free(ddt16_pool, ddt->virt, ddt->phys); - } else { - dma_pool_free(ddt4_pool, ddt->virt, ddt->phys); - } - - ddt->virt = NULL; - } - - return 0; -} diff --git a/drivers/crypto/dwc-spacc/spacc_hal.h b/drivers/crypto/dwc-spacc/spacc_hal.h deleted file mode 100644 index 8b817f993f3d..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_hal.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef SPACC_HAL_H -#define SPACC_HAL_H - -/* Maximum number of DDT entries allowed*/ -#ifndef PDU_MAX_DDT -#define PDU_MAX_DDT 64 -#endif - -/* Platform Generic */ -#define PDU_IRQ_EN_GLBL BIT(31) -#define PDU_IRQ_EN_VSPACC(x) (1UL << (x)) -#define PDU_IRQ_EN_RNG BIT(16) - -#ifndef SPACC_ID_MINOR - #define SPACC_ID_MINOR(x) ((x) & 0x0F) - #define SPACC_ID_MAJOR(x) (((x) >> 4) & 0x0F) - #define SPACC_ID_QOS(x) (((x) >> 8) & 0x01) - #define SPACC_ID_TYPE(x) (((x) >> 9) & 0x03) - #define SPACC_ID_AUX(x) (((x) >> 11) & 0x01) - #define SPACC_ID_VIDX(x) (((x) >> 12) & 0x07) - #define SPACC_ID_PARTIAL(x) (((x) >> 15) & 0x01) - #define SPACC_ID_PROJECT(x) ((x) >> 16) - - #define SPACC_TYPE_SPACCQOS 0 - #define SPACC_TYPE_PDU 1 - - #define SPACC_CFG_CTX_CNT(x) ((x) & 0x7F) - #define SPACC_CFG_RC4_CTX_CNT(x) (((x) >> 8) & 0x7F) - #define SPACC_CFG_VSPACC_CNT(x) (((x) >> 16) & 0x0F) - #define SPACC_CFG_CIPH_CTX_SZ(x) (((x) >> 20) & 0x07) - #define SPACC_CFG_HASH_CTX_SZ(x) (((x) >> 24) & 0x0F) - #define SPACC_CFG_DMA_TYPE(x) (((x) >> 28) & 0x03) - - #define SPACC_CFG_CMD0_FIFO_QOS(x) (((x) >> 0) & 0x7F) - #define SPACC_CFG_CMD0_FIFO(x) (((x) >> 0) & 0x1FF) - #define SPACC_CFG_CMD1_FIFO(x) (((x) >> 8) & 0x7F) - #define SPACC_CFG_CMD2_FIFO(x) (((x) >> 16) & 0x7F) - #define SPACC_CFG_STAT_FIFO_QOS(x) (((x) >> 24) & 0x7F) - #define SPACC_CFG_STAT_FIFO(x) (((x) >> 16) & 0x1FF) - - #define SPACC_PDU_CFG_MINOR(x) ((x) & 0x0F) - #define SPACC_PDU_CFG_MAJOR(x) (((x) >> 4) & 0x0F) - - #define PDU_SECURE_LOCK_SPACC(x) (x) - #define PDU_SECURE_LOCK_CFG BIT(30) - #define PDU_SECURE_LOCK_GLBL BIT(31) -#endif /* SPACC_ID_MINOR */ - -#define CRYPTO_OK (0) - -struct spacc_version_block { - unsigned int minor, - major, - version, - qos, - is_spacc, - is_pdu, - aux, - vspacc_idx, - partial, - project, - ivimport; -}; - -struct spacc_config_block { - unsigned int num_ctx, - num_vspacc, - ciph_ctx_page_size, - hash_ctx_page_size, - dma_type, - cmd0_fifo_depth, - cmd1_fifo_depth, - cmd2_fifo_depth, - stat_fifo_depth; -}; - -struct pdu_config_block { - unsigned int minor, - major; -}; - -struct pdu_info { - u32 clockrate; - struct spacc_version_block spacc_version; - struct spacc_config_block spacc_config; - struct pdu_config_block pdu_config; -}; - -struct pdu_ddt { - dma_addr_t phys; - u32 *virt; - u32 *virt_orig; - unsigned long idx, limit, len; -}; - -void pdu_io_cached_write(void __iomem *addr, unsigned long val, - uint32_t *cache); -void pdu_to_dev(void __iomem *addr, uint32_t *src, unsigned long nword); -void pdu_from_dev(u32 *dst, void __iomem *addr, unsigned long nword); -void pdu_from_dev_s(unsigned char *dst, void __iomem *addr, unsigned long nword, - int endian); -void pdu_to_dev_s(void __iomem *addr, const unsigned char *src, - unsigned long nword, int endian); -struct device *get_ddt_device(void); -int pdu_mem_init(void *device); -void pdu_mem_deinit(void *device); -int pdu_ddt_init(struct pdu_ddt *ddt, unsigned long limit); -int pdu_ddt_add(struct pdu_ddt *ddt, dma_addr_t phys, unsigned long size); -int pdu_ddt_free(struct pdu_ddt *ddt); -int pdu_get_version(void __iomem *dev, struct pdu_info *inf); - -#endif diff --git a/drivers/crypto/dwc-spacc/spacc_interrupt.c b/drivers/crypto/dwc-spacc/spacc_interrupt.c deleted file mode 100644 index 176b3d6be25d..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_interrupt.c +++ /dev/null @@ -1,316 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include "spacc_core.h" - -/* Read the IRQ status register and process as needed */ - - -void spacc_disable_int (struct spacc_device *spacc); - -static inline uint32_t _spacc_get_stat_cnt(struct spacc_device *spacc) -{ - u32 fifo; - - if (spacc->config.is_qos) - fifo = SPACC_FIFO_STAT_STAT_CNT_GET_QOS(readl(spacc->regmap + - SPACC_REG_FIFO_STAT)); - else - fifo = SPACC_FIFO_STAT_STAT_CNT_GET(readl(spacc->regmap + - SPACC_REG_FIFO_STAT)); - - return fifo; -} - -static int spacc_pop_packets_ex(struct spacc_device *spacc, int *num_popped, - unsigned long *lock_flag) -{ - int jobs; - int ret = -EINPROGRESS; - struct spacc_job *job = NULL; - u32 cmdstat, swid, spacc_errcode = SPACC_OK; - - *num_popped = 0; - - while ((jobs = _spacc_get_stat_cnt(spacc))) { - while (jobs-- > 0) { - /* write the pop register to get the next job */ - writel(1, spacc->regmap + SPACC_REG_STAT_POP); - cmdstat = readl(spacc->regmap + SPACC_REG_STATUS); - - swid = SPACC_STATUS_SW_ID_GET(cmdstat); - - if (spacc->job_lookup[swid] == SPACC_JOB_IDX_UNUSED) { - ret = -EIO; - goto ERR; - } - - /* find the associated job with popped swid */ - if (swid < 0 || swid >= SPACC_MAX_JOBS) - job = NULL; - else - job = &spacc->job[spacc->job_lookup[swid]]; - - if (!job) { - ret = -EIO; - goto ERR; - } - - /* mark job as done */ - job->job_done = 1; - spacc->job_lookup[swid] = SPACC_JOB_IDX_UNUSED; - spacc_errcode = SPACC_GET_STATUS_RET_CODE(cmdstat); - - switch (spacc_errcode) { - case SPACC_ICVFAIL: - ret = -EBADMSG; - break; - case SPACC_MEMERR: - ret = -EINVAL; - break; - case SPACC_BLOCKERR: - ret = -EINVAL; - break; - case SPACC_SECERR: - ret = -EIO; - break; - case SPACC_OK: - ret = CRYPTO_OK; - break; - default: - pr_debug("Invalid SPAcc Error"); - } - - job->job_err = ret; - - /* - * We're done touching the SPAcc hw, so release the - * lock across the job callback. It must be reacquired - * before continuing to the next iteration. - */ - - if (job->cb) { - spin_unlock_irqrestore(&spacc->lock, - *lock_flag); - job->cb(spacc, job->cbdata); - spin_lock_irqsave(&spacc->lock, - *lock_flag); - } - - (*num_popped)++; - } - } - - if (!*num_popped) - pr_debug(" Failed to pop a single job\n"); - -ERR: - spacc_process_jb(spacc); - - /* reset the WD timer to the original value*/ - if (spacc->op_mode == SPACC_OP_MODE_WD) - spacc_set_wd_count(spacc, spacc->config.wd_timer); - - if (*num_popped && spacc->spacc_notify_jobs) - spacc->spacc_notify_jobs(spacc); - - return ret; -} - -int spacc_pop_packets(struct spacc_device *spacc, int *num_popped) -{ - int err; - unsigned long lock_flag; - - spin_lock_irqsave(&spacc->lock, lock_flag); - err = spacc_pop_packets_ex(spacc, num_popped, &lock_flag); - spin_unlock_irqrestore(&spacc->lock, lock_flag); - - return err; -} - -uint32_t spacc_process_irq(struct spacc_device *spacc) -{ - u32 temp; - int x, cmd_max; - unsigned long lock_flag; - - spin_lock_irqsave(&spacc->lock, lock_flag); - - temp = readl(spacc->regmap + SPACC_REG_IRQ_STAT); - - /* clear interrupt pin and run registered callback */ - if (temp & SPACC_IRQ_STAT_STAT) { - SPACC_IRQ_STAT_CLEAR_STAT(spacc); - if (spacc->op_mode == SPACC_OP_MODE_IRQ) { - spacc->config.fifo_cnt <<= 2; - if (spacc->config.fifo_cnt >= - spacc->config.stat_fifo_depth) - spacc->config.fifo_cnt = - spacc->config.stat_fifo_depth; - - /* update fifo count to allow more stati to pile up*/ - spacc_irq_stat_enable(spacc, spacc->config.fifo_cnt); - /* reenable CMD0 empty interrupt*/ - spacc_irq_cmdx_enable(spacc, 0, 0); - } - - if (spacc->irq_cb_stat) - spacc->irq_cb_stat(spacc); - } - - /* Watchdog IRQ */ - if (spacc->op_mode == SPACC_OP_MODE_WD) { - if (temp & SPACC_IRQ_STAT_STAT_WD) { - if (++spacc->wdcnt == SPACC_WD_LIMIT) { - /* this happens when you get too many IRQs that - * go unanswered - */ - spacc_irq_stat_wd_disable(spacc); - /* we set the STAT CNT to 1 so that every job - * generates an IRQ now - */ - spacc_irq_stat_enable(spacc, 1); - spacc->op_mode = SPACC_OP_MODE_IRQ; - } else if (spacc->config.wd_timer < (0xFFFFFFUL >> 4)) { - /* if the timer isn't too high lets bump it up - * a bit so as to give the IRQ a chance to - * reply - */ - spacc_set_wd_count(spacc, - spacc->config.wd_timer << 4); - } - - SPACC_IRQ_STAT_CLEAR_STAT_WD(spacc); - if (spacc->irq_cb_stat_wd) - spacc->irq_cb_stat_wd(spacc); - } - } - - if (spacc->op_mode == SPACC_OP_MODE_IRQ) { - cmd_max = (spacc->config.is_qos ? SPACC_CMDX_MAX_QOS : - SPACC_CMDX_MAX); - for (x = 0; x < cmd_max; x++) { - if (temp & SPACC_IRQ_STAT_CMDX(x)) { - spacc->config.fifo_cnt = 1; - /* disable CMD0 interrupt since STAT=1 */ - spacc_irq_cmdx_disable(spacc, x); - spacc_irq_stat_enable(spacc, - spacc->config.fifo_cnt); - - SPACC_IRQ_STAT_CLEAR_CMDX(spacc, x); - /* run registered callback */ - if (spacc->irq_cb_cmdx) - spacc->irq_cb_cmdx(spacc, x); - } - } - } - - spin_unlock_irqrestore(&spacc->lock, lock_flag); - - return temp; -} - -void spacc_set_wd_count(struct spacc_device *spacc, uint32_t val) -{ - writel(val, spacc->regmap + SPACC_REG_STAT_WD_CTRL); -} - -/* cmdx and cmdx_cnt depend on HW config - * cmdx can be 0, 1 or 2 - * cmdx_cnt must be 2^6 or less - */ -void spacc_irq_cmdx_enable(struct spacc_device *spacc, int cmdx, int cmdx_cnt) -{ - u32 temp; - - /* read the reg, clear the bit range and set the new value */ - temp = readl(spacc->regmap + SPACC_REG_IRQ_CTRL) & - (~SPACC_IRQ_CTRL_CMDX_CNT_MASK(cmdx)); - temp |= SPACC_IRQ_CTRL_CMDX_CNT_SET(cmdx, cmdx_cnt); - - writel(temp | SPACC_IRQ_CTRL_CMDX_CNT_SET(cmdx, cmdx_cnt), - spacc->regmap + SPACC_REG_IRQ_CTRL); - - writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_CMD(cmdx), - spacc->regmap + SPACC_REG_IRQ_EN); -} - -void spacc_irq_cmdx_disable(struct spacc_device *spacc, int cmdx) -{ - writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & - (~SPACC_IRQ_EN_CMD(cmdx)), spacc->regmap + SPACC_REG_IRQ_EN); -} - -void spacc_irq_stat_enable(struct spacc_device *spacc, int stat_cnt) -{ - u32 temp; - - temp = readl(spacc->regmap + SPACC_REG_IRQ_CTRL); - if (spacc->config.is_qos) { - temp &= (~SPACC_IRQ_CTRL_STAT_CNT_MASK_QOS); - temp |= SPACC_IRQ_CTRL_STAT_CNT_SET_QOS(stat_cnt); - } else { - temp &= (~SPACC_IRQ_CTRL_STAT_CNT_MASK); - temp |= SPACC_IRQ_CTRL_STAT_CNT_SET(stat_cnt); - } - - writel(temp, spacc->regmap + SPACC_REG_IRQ_CTRL); - writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_STAT, - spacc->regmap + SPACC_REG_IRQ_EN); -} - -void spacc_irq_stat_disable(struct spacc_device *spacc) -{ - writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & (~SPACC_IRQ_EN_STAT), - spacc->regmap + SPACC_REG_IRQ_EN); -} - -void spacc_irq_stat_wd_enable(struct spacc_device *spacc) -{ - writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_STAT_WD, - spacc->regmap + SPACC_REG_IRQ_EN); -} - -void spacc_irq_stat_wd_disable(struct spacc_device *spacc) -{ - writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & - (~SPACC_IRQ_EN_STAT_WD), spacc->regmap + SPACC_REG_IRQ_EN); -} - -void spacc_irq_glbl_enable(struct spacc_device *spacc) -{ - writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_GLBL, - spacc->regmap + SPACC_REG_IRQ_EN); -} - -void spacc_irq_glbl_disable(struct spacc_device *spacc) -{ - writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & (~SPACC_IRQ_EN_GLBL), - spacc->regmap + SPACC_REG_IRQ_EN); -} - -void spacc_disable_int (struct spacc_device *spacc) -{ - writel(0, spacc->regmap + SPACC_REG_IRQ_EN); -} - -/* a function to run callbacks in the IRQ handler */ -irqreturn_t spacc_irq_handler(int irq, void *dev) -{ - struct spacc_priv *priv = platform_get_drvdata(to_platform_device(dev)); - struct spacc_device *spacc = &priv->spacc; - - if (spacc->config.oldtimer != spacc->config.timer) { - priv->spacc.config.wd_timer = spacc->config.timer; - spacc_set_wd_count(&priv->spacc, priv->spacc.config.wd_timer); - spacc->config.oldtimer = spacc->config.timer; - } - - /* check irq flags and process as required */ - if (!spacc_process_irq(spacc)) - return IRQ_NONE; - - return IRQ_HANDLED; -} diff --git a/drivers/crypto/dwc-spacc/spacc_manager.c b/drivers/crypto/dwc-spacc/spacc_manager.c deleted file mode 100644 index d42ae499e959..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_manager.c +++ /dev/null @@ -1,653 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include "spacc_core.h" - -#ifndef MIN -#define MIN(x, y) (((x) < (y)) ? (x) : (y)) -#endif - -/* prevent reading past the end of the buffer */ -static void read_from_buf(unsigned char *dst, unsigned char *src, - int off, int n, int max) -{ - if (!dst) - return; - - while (off < max && n) { - *dst++ = src[off++]; - --n; - } -} - -static void write_to_buf(unsigned char *dst, const unsigned char *src, - int off, int n, int len) -{ - if (!src) - return; - - while (n && (off < len)) { - dst[off++] = *src++; - --n; - } -} - -/* This function is not meant to be called directly, - * it should be called from the job manager - */ -static int spacc_ctx_request(struct spacc_device *spacc, - int ctx_id, int ncontig) -{ - int ret; - int x, y, count; - unsigned long lock_flag; - - if (!spacc) - return -1; - - if (ctx_id > spacc->config.num_ctx) - return -1; - - if (ncontig < 1 || ncontig > spacc->config.num_ctx) - return -1; - - ret = CRYPTO_OK; - - spin_lock_irqsave(&spacc->ctx_lock, lock_flag); - /* allocating scheme, look for contiguous contexts. Free contexts have - * a ref_cnt of 0. - * If specific ctx_id is requested, - * test the ncontig and then bump the ref_cnt - */ - if (ctx_id != -1) { - if ((&spacc->ctx[ctx_id])->ncontig != ncontig - 1) - ret = -1; - } else { - /* check to see if ncontig are free - * loop over all available contexts to find the first - * ncontig empty ones - */ - for (x = 0; x <= (spacc->config.num_ctx - ncontig); ) { - count = ncontig; - while (count) { - if ((&spacc->ctx[x + count - 1])->ref_cnt != 0) { - /* incr x to past failed count - * location - */ - x = x + count; - break; - } - count--; - } - if (count != 0) { - ret = -1; - /* test next x */ - } else { - ctx_id = x; - ret = CRYPTO_OK; - break; - } - } - } - - if (ret == CRYPTO_OK) { - /* ctx_id is good so mark used */ - for (y = 0; y < ncontig; y++) - (&spacc->ctx[ctx_id + y])->ref_cnt++; - (&spacc->ctx[ctx_id])->ncontig = ncontig - 1; - } else { - ctx_id = -1; - } - - spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag); - - return ctx_id; -} - -static int spacc_ctx_release(struct spacc_device *spacc, int ctx_id) -{ - int y; - int ncontig; - unsigned long lock_flag; - - if (ctx_id < 0 || ctx_id > spacc->config.num_ctx) - return -EINVAL; - - spin_lock_irqsave(&spacc->ctx_lock, lock_flag); - /* release the base context and contiguous block */ - ncontig = (&spacc->ctx[ctx_id])->ncontig; - for (y = 0; y <= ncontig; y++) { - if ((&spacc->ctx[ctx_id + y])->ref_cnt > 0) - (&spacc->ctx[ctx_id + y])->ref_cnt--; - } - - if ((&spacc->ctx[ctx_id])->ref_cnt == 0) { - (&spacc->ctx[ctx_id])->ncontig = 0; -#ifdef CONFIG_CRYPTO_DEV_SPACC_SECURE_MODE - /* TODO: This driver works in harmony with "normal" kernel - * processes so we release the context all the time - * normally this would be done from a "secure" kernel process - * (trustzone/etc). This hack is so that SPACC.0 - * cores can both use the same context space. - */ - writel(ctx_id, spacc->regmap + SPACC_REG_SECURE_RELEASE); -#endif - } - - spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag); - - return CRYPTO_OK; -} - -/* Job manager: This will reset all job data, pointers, etc */ -void spacc_job_init_all(struct spacc_device *spacc) -{ - int x; - struct spacc_job *job; - - for (x = 0; x < (SPACC_MAX_JOBS); x++) { - job = &spacc->job[x]; - memset(job, 0, sizeof(struct spacc_job)); - - job->job_swid = SPACC_JOB_IDX_UNUSED; - job->job_used = SPACC_JOB_IDX_UNUSED; - spacc->job_lookup[x] = SPACC_JOB_IDX_UNUSED; - } -} - -/* get a new job id and use a specific ctx_idx or -1 for a new one */ -int spacc_job_request(struct spacc_device *spacc, int ctx_idx) -{ - int x, ret; - struct spacc_job *job; - unsigned long lock_flag; - - if (!spacc) - return -1; - - spin_lock_irqsave(&spacc->lock, lock_flag); - - /* find the first available job id */ - for (x = 0; x < SPACC_MAX_JOBS; x++) { - job = &spacc->job[x]; - if (job->job_used == SPACC_JOB_IDX_UNUSED) { - job->job_used = x; - break; - } - } - - if (x == SPACC_MAX_JOBS) { - ret = -1; - } else { - /* associate a single context to go with job */ - ret = spacc_ctx_request(spacc, ctx_idx, 1); - if (ret != -1) { - job->ctx_idx = ret; - ret = x; - } - } - - spin_unlock_irqrestore(&spacc->lock, lock_flag); - - return ret; -} - -int spacc_job_release(struct spacc_device *spacc, int job_idx) -{ - int ret; - struct spacc_job *job; - unsigned long lock_flag; - - if (!spacc) - return -EINVAL; - - if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) - return -ENXIO; - - spin_lock_irqsave(&spacc->lock, lock_flag); - - job = &spacc->job[job_idx]; - /* release context that goes with job */ - ret = spacc_ctx_release(spacc, job->ctx_idx); - job->ctx_idx = SPACC_CTX_IDX_UNUSED; - job->job_used = SPACC_JOB_IDX_UNUSED; - /* disable any callback*/ - job->cb = NULL; - - /* NOTE: this leaves ctrl data in memory */ - spin_unlock_irqrestore(&spacc->lock, lock_flag); - - return ret; -} - -int spacc_handle_release(struct spacc_device *spacc, int job_idx) -{ - int ret = 0; - struct spacc_job *job; - unsigned long lock_flag; - - if (!spacc) - return -EINVAL; - - if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) - return -ENXIO; - - spin_lock_irqsave(&spacc->lock, lock_flag); - - job = &spacc->job[job_idx]; - job->job_used = SPACC_JOB_IDX_UNUSED; - job->cb = NULL; /* disable any callback*/ - - /* NOTE: this leaves ctrl data in memory */ - spin_unlock_irqrestore(&spacc->lock, lock_flag); - - return ret; -} - -/* Return a context structure for a job idx or null if invalid */ -struct spacc_ctx *context_lookup_by_job(struct spacc_device *spacc, int job_idx) -{ - if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS) - return NULL; - - return &spacc->ctx[(&spacc->job[job_idx])->ctx_idx]; -} - -int spacc_process_jb(struct spacc_device *spacc) -{ - int tail, ret; - - /* are there jobs in the buffer? */ - while (spacc->jb_head != spacc->jb_tail) { - tail = spacc->jb_tail; - - if (spacc->job_buffer[tail].active) { - ret = spacc_packet_enqueue_ddt_ex(spacc, 0, - spacc->job_buffer[tail].job_idx, - spacc->job_buffer[tail].src, - spacc->job_buffer[tail].dst, - spacc->job_buffer[tail].proc_sz, - spacc->job_buffer[tail].aad_offset, - spacc->job_buffer[tail].pre_aad_sz, - spacc->job_buffer[tail].post_aad_sz, - spacc->job_buffer[tail].iv_offset, - spacc->job_buffer[tail].prio); - - if (ret != -EBUSY) - spacc->job_buffer[tail].active = 0; - else - return -1; - } - - tail++; - if (tail == SPACC_MAX_JOB_BUFFERS) - tail = 0; - - spacc->jb_tail = tail; - } - - return 0; -} - -/* Write appropriate context data which depends on operation and mode */ -int spacc_write_context(struct spacc_device *spacc, int job_idx, int op, - const unsigned char *key, int ksz, - const unsigned char *iv, int ivsz) -{ - int buflen; - int ret = CRYPTO_OK; - unsigned char buf[300]; - struct spacc_ctx *ctx = NULL; - struct spacc_job *job = NULL; - - if (job_idx < 0 || job_idx > SPACC_MAX_JOBS) - return -ENXIO; - - job = &spacc->job[job_idx]; - ctx = context_lookup_by_job(spacc, job_idx); - - if (!job || !ctx) - return -EIO; - - switch (op) { - case SPACC_CRYPTO_OPERATION: - /* get page size and then read so we can do a - * read-modify-write cycle - */ - buflen = MIN(sizeof(buf), - (unsigned int)spacc->config.ciph_page_size); - - pdu_from_dev_s(buf, ctx->ciph_key, buflen >> 2, - spacc->config.spacc_endian); - - switch (job->enc_mode) { - case CRYPTO_MODE_SM4_ECB: - case CRYPTO_MODE_SM4_CBC: - case CRYPTO_MODE_SM4_CFB: - case CRYPTO_MODE_SM4_OFB: - case CRYPTO_MODE_SM4_CTR: - case CRYPTO_MODE_SM4_CCM: - case CRYPTO_MODE_SM4_GCM: - case CRYPTO_MODE_SM4_CS1: - case CRYPTO_MODE_SM4_CS2: - case CRYPTO_MODE_SM4_CS3: - case CRYPTO_MODE_AES_ECB: - case CRYPTO_MODE_AES_CBC: - case CRYPTO_MODE_AES_CS1: - case CRYPTO_MODE_AES_CS2: - case CRYPTO_MODE_AES_CS3: - case CRYPTO_MODE_AES_CFB: - case CRYPTO_MODE_AES_OFB: - case CRYPTO_MODE_AES_CTR: - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_AES_GCM: - write_to_buf(buf, key, 0, ksz, buflen); - if (iv) { - unsigned char one[4] = { 0, 0, 0, 1 }; - unsigned long enc1, enc2; - - enc1 = CRYPTO_MODE_AES_GCM; - enc2 = CRYPTO_MODE_SM4_GCM; - - write_to_buf(buf, iv, 32, ivsz, buflen); - if (ivsz == 12 && - (job->enc_mode == enc1 || - job->enc_mode == enc2)) - write_to_buf(buf, one, 11 * 4, 4, - buflen); - } - break; - case CRYPTO_MODE_SM4_F8: - case CRYPTO_MODE_AES_F8: - if (key) { - write_to_buf(buf, key + ksz, 0, ksz, buflen); - write_to_buf(buf, key, 48, ksz, buflen); - } - write_to_buf(buf, iv, 32, 16, buflen); - break; - case CRYPTO_MODE_SM4_XTS: - case CRYPTO_MODE_AES_XTS: - if (key) { - write_to_buf(buf, key, 0, - ksz >> 1, buflen); - write_to_buf(buf, key + (ksz >> 1), 48, - ksz >> 1, buflen); - /* divide by two since that's - * what we program the hardware - */ - ksz = ksz >> 1; - } - write_to_buf(buf, iv, 32, 16, buflen); - break; - case CRYPTO_MODE_MULTI2_ECB: - case CRYPTO_MODE_MULTI2_CBC: - case CRYPTO_MODE_MULTI2_OFB: - case CRYPTO_MODE_MULTI2_CFB: - write_to_buf(buf, key, 0, ksz, buflen); - write_to_buf(buf, iv, 0x28, ivsz, buflen); - if (ivsz <= 8) { - /*default to 128 rounds*/ - unsigned char rounds[4] = { 0, 0, 0, 128}; - - write_to_buf(buf, rounds, 0x30, 4, buflen); - } - break; - case CRYPTO_MODE_3DES_CBC: - case CRYPTO_MODE_3DES_ECB: - case CRYPTO_MODE_DES_CBC: - case CRYPTO_MODE_DES_ECB: - write_to_buf(buf, iv, 0, 8, buflen); - write_to_buf(buf, key, 8, ksz, buflen); - break; - case CRYPTO_MODE_KASUMI_ECB: - case CRYPTO_MODE_KASUMI_F8: - write_to_buf(buf, iv, 16, 8, buflen); - write_to_buf(buf, key, 0, 16, buflen); - break; - case CRYPTO_MODE_SNOW3G_UEA2: - case CRYPTO_MODE_ZUC_UEA3: - write_to_buf(buf, key, 0, 32, buflen); - break; - case CRYPTO_MODE_CHACHA20_STREAM: - case CRYPTO_MODE_CHACHA20_POLY1305: - write_to_buf(buf, key, 0, ksz, buflen); - write_to_buf(buf, iv, 32, ivsz, buflen); - break; - case CRYPTO_MODE_NULL: - break; - } - - if (key) { - job->ckey_sz = SPACC_SET_CIPHER_KEY_SZ(ksz); - job->first_use = 1; - } - pdu_to_dev_s(ctx->ciph_key, buf, buflen >> 2, - spacc->config.spacc_endian); - break; - - case SPACC_HASH_OPERATION: - /* get page size and then read so we can do a - * read-modify-write cycle - */ - buflen = MIN(sizeof(buf), - (u32)spacc->config.hash_page_size); - pdu_from_dev_s(buf, ctx->hash_key, buflen >> 2, - spacc->config.spacc_endian); - - switch (job->hash_mode) { - case CRYPTO_MODE_MAC_XCBC: - case CRYPTO_MODE_MAC_SM4_XCBC: - if (key) { - write_to_buf(buf, key + (ksz - 32), 32, 32, - buflen); - write_to_buf(buf, key, 0, (ksz - 32), - buflen); - job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz - 32); - } - break; - case CRYPTO_MODE_HASH_CRC32: - case CRYPTO_MODE_MAC_SNOW3G_UIA2: - case CRYPTO_MODE_MAC_ZUC_UIA3: - if (key) { - write_to_buf(buf, key, 0, ksz, buflen); - job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz); - } - break; - case CRYPTO_MODE_MAC_POLY1305: - write_to_buf(buf, key, 0, ksz, buflen); - write_to_buf(buf, iv, 32, ivsz, buflen); - break; - case CRYPTO_MODE_HASH_CSHAKE128: - case CRYPTO_MODE_HASH_CSHAKE256: - /* use "iv" and "key" to */ - /* pass s-string and n-string */ - write_to_buf(buf, iv, 0, ivsz, buflen); - write_to_buf(buf, key, - spacc->config.string_size, ksz, buflen); - break; - case CRYPTO_MODE_MAC_KMAC128: - case CRYPTO_MODE_MAC_KMAC256: - case CRYPTO_MODE_MAC_KMACXOF128: - case CRYPTO_MODE_MAC_KMACXOF256: - /* use "iv" and "key" to pass s-string & key */ - write_to_buf(buf, iv, 0, ivsz, buflen); - write_to_buf(buf, key, - spacc->config.string_size, ksz, buflen); - job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz); - break; - default: - if (key) { - job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz); - write_to_buf(buf, key, 0, ksz, buflen); - } - } - pdu_to_dev_s(ctx->hash_key, buf, buflen >> 2, - spacc->config.spacc_endian); - break; - default: - ret = -EINVAL; - } - - return ret; -} - -int spacc_read_context(struct spacc_device *spacc, int job_idx, - int op, unsigned char *key, int ksz, - unsigned char *iv, int ivsz) -{ - int buflen; - int ret = CRYPTO_OK; - unsigned char buf[300]; - struct spacc_ctx *ctx = NULL; - struct spacc_job *job = NULL; - - if (job_idx < 0 || job_idx > SPACC_MAX_JOBS) - return -ENXIO; - - job = &spacc->job[job_idx]; - ctx = context_lookup_by_job(spacc, job_idx); - - if (!ctx) - return -EIO; - - switch (op) { - case SPACC_CRYPTO_OPERATION: - buflen = MIN(sizeof(buf), - (u32)spacc->config.ciph_page_size); - pdu_from_dev_s(buf, ctx->ciph_key, buflen >> 2, - spacc->config.spacc_endian); - - switch (job->enc_mode) { - case CRYPTO_MODE_SM4_ECB: - case CRYPTO_MODE_SM4_CBC: - case CRYPTO_MODE_SM4_CFB: - case CRYPTO_MODE_SM4_OFB: - case CRYPTO_MODE_SM4_CTR: - case CRYPTO_MODE_SM4_CCM: - case CRYPTO_MODE_SM4_GCM: - case CRYPTO_MODE_SM4_CS1: - case CRYPTO_MODE_SM4_CS2: - case CRYPTO_MODE_SM4_CS3: - case CRYPTO_MODE_AES_ECB: - case CRYPTO_MODE_AES_CBC: - case CRYPTO_MODE_AES_CS1: - case CRYPTO_MODE_AES_CS2: - case CRYPTO_MODE_AES_CS3: - case CRYPTO_MODE_AES_CFB: - case CRYPTO_MODE_AES_OFB: - case CRYPTO_MODE_AES_CTR: - case CRYPTO_MODE_AES_CCM: - case CRYPTO_MODE_AES_GCM: - read_from_buf(key, buf, 0, ksz, buflen); - read_from_buf(iv, buf, 32, 16, buflen); - break; - case CRYPTO_MODE_CHACHA20_STREAM: - read_from_buf(key, buf, 0, ksz, buflen); - read_from_buf(iv, buf, 32, 16, buflen); - break; - case CRYPTO_MODE_SM4_F8: - case CRYPTO_MODE_AES_F8: - if (key) { - read_from_buf(key + ksz, buf, 0, ksz, buflen); - read_from_buf(key, buf, 48, ksz, buflen); - } - read_from_buf(iv, buf, 32, 16, buflen); - break; - case CRYPTO_MODE_SM4_XTS: - case CRYPTO_MODE_AES_XTS: - if (key) { - read_from_buf(key, buf, 0, ksz >> 1, buflen); - read_from_buf(key + (ksz >> 1), buf, - 48, ksz >> 1, buflen); - } - read_from_buf(iv, buf, 32, 16, buflen); - break; - case CRYPTO_MODE_MULTI2_ECB: - case CRYPTO_MODE_MULTI2_CBC: - case CRYPTO_MODE_MULTI2_OFB: - case CRYPTO_MODE_MULTI2_CFB: - read_from_buf(key, buf, 0, ksz, buflen); - /* Number of rounds at the end of the IV */ - read_from_buf(iv, buf, 0x28, ivsz, buflen); - break; - case CRYPTO_MODE_3DES_CBC: - case CRYPTO_MODE_3DES_ECB: - read_from_buf(iv, buf, 0, 8, buflen); - read_from_buf(key, buf, 8, 24, buflen); - break; - case CRYPTO_MODE_DES_CBC: - case CRYPTO_MODE_DES_ECB: - read_from_buf(iv, buf, 0, 8, buflen); - read_from_buf(key, buf, 8, 8, buflen); - break; - case CRYPTO_MODE_KASUMI_ECB: - case CRYPTO_MODE_KASUMI_F8: - read_from_buf(iv, buf, 16, 8, buflen); - read_from_buf(key, buf, 0, 16, buflen); - break; - case CRYPTO_MODE_SNOW3G_UEA2: - case CRYPTO_MODE_ZUC_UEA3: - read_from_buf(key, buf, 0, 32, buflen); - break; - case CRYPTO_MODE_NULL: - break; - } - break; - - case SPACC_HASH_OPERATION: - buflen = MIN(sizeof(buf), - (u32)spacc->config.hash_page_size); - pdu_from_dev_s(buf, ctx->hash_key, buflen >> 2, - spacc->config.spacc_endian); - - switch (job->hash_mode) { - case CRYPTO_MODE_MAC_XCBC: - case CRYPTO_MODE_MAC_SM4_XCBC: - if (key && ksz <= 64) { - read_from_buf(key + (ksz - 32), buf, - 32, 32, buflen); - read_from_buf(key, buf, 0, ksz - 32, buflen); - } - break; - case CRYPTO_MODE_HASH_CRC32: - read_from_buf(iv, buf, 0, ivsz, buflen); - break; - case CRYPTO_MODE_MAC_SNOW3G_UIA2: - case CRYPTO_MODE_MAC_ZUC_UIA3: - read_from_buf(key, buf, 0, 32, buflen); - break; - default: - read_from_buf(key, buf, 0, ksz, buflen); - } - break; - default: - ret = -EINVAL; - } - - return ret; -} - -/* Context manager: This will reset all reference counts, pointers, etc */ -void spacc_ctx_init_all(struct spacc_device *spacc) -{ - int x; - struct spacc_ctx *ctx; - unsigned long lock_flag; - - spin_lock_irqsave(&spacc->ctx_lock, lock_flag); - - /* initialize contexts */ - for (x = 0; x < spacc->config.num_ctx; x++) { - ctx = &spacc->ctx[x]; - - /* sets everything including ref_cnt and ncontig to 0 */ - memset(ctx, 0, sizeof(*ctx)); - - ctx->ciph_key = spacc->regmap + SPACC_CTX_CIPH_KEY + - (x * spacc->config.ciph_page_size); - ctx->hash_key = spacc->regmap + SPACC_CTX_HASH_KEY + - (x * spacc->config.hash_page_size); - } - - spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag); -} diff --git a/drivers/crypto/dwc-spacc/spacc_skcipher.c b/drivers/crypto/dwc-spacc/spacc_skcipher.c deleted file mode 100644 index 1ef7c665188f..000000000000 --- a/drivers/crypto/dwc-spacc/spacc_skcipher.c +++ /dev/null @@ -1,717 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include -#include -#include -#include - -#include "spacc_device.h" -#include "spacc_core.h" - -static LIST_HEAD(spacc_cipher_alg_list); -static DEFINE_MUTEX(spacc_cipher_alg_mutex); - -static struct mode_tab possible_ciphers[] = { - /* {keylen, MODE_TAB_CIPH(name, id, iv_len, blk_len)} */ - - /* SM4 */ - { MODE_TAB_CIPH("cbc(sm4)", SM4_CBC, 16, 16), .keylen[0] = 16, - .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 }, - { MODE_TAB_CIPH("ecb(sm4)", SM4_ECB, 0, 16), .keylen[0] = 16, - .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 }, - { MODE_TAB_CIPH("ctr(sm4)", SM4_CTR, 16, 1), .keylen[0] = 16, - .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 }, - { MODE_TAB_CIPH("xts(sm4)", SM4_XTS, 16, 16), .keylen[0] = 32, - .chunksize = 16, .walksize = 16, .min_keysize = 32, .max_keysize = 32 }, - { MODE_TAB_CIPH("cts(cbc(sm4))", SM4_CS3, 16, 16), .keylen[0] = 16, - .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 }, - - /* AES */ - { MODE_TAB_CIPH("cbc(aes)", AES_CBC, 16, 16), .keylen = { 16, 24, 32 }, - .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 }, - { MODE_TAB_CIPH("ecb(aes)", AES_ECB, 0, 16), .keylen = { 16, 24, 32 }, - .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 }, - { MODE_TAB_CIPH("xts(aes)", AES_XTS, 16, 16), .keylen = { 32, 48, 64 }, - .chunksize = 16, .walksize = 16, .min_keysize = 32, .max_keysize = 64 }, - { MODE_TAB_CIPH("cts(cbc(aes))", AES_CS3, 16, 16), - .keylen = { 16, 24, 32 }, .chunksize = 16, .walksize = 16, - .min_keysize = 16, .max_keysize = 32 }, - { MODE_TAB_CIPH("ctr(aes)", AES_CTR, 16, 1), .keylen = { 16, 24, 32 }, - .chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 }, - - /* CHACHA20 */ - { MODE_TAB_CIPH("chacha20", CHACHA20_STREAM, 16, 1), .keylen[0] = 32, - .chunksize = 64, .walksize = 64, .min_keysize = 32, .max_keysize = 32 }, - - /* DES */ - { MODE_TAB_CIPH("ecb(des)", DES_ECB, 0, 8), .keylen[0] = 8, - .chunksize = 8, .walksize = 8, .min_keysize = 8, .max_keysize = 8}, - { MODE_TAB_CIPH("cbc(des)", DES_CBC, 8, 8), .keylen[0] = 8, - .chunksize = 8, .walksize = 8, .min_keysize = 8, .max_keysize = 8}, - { MODE_TAB_CIPH("ecb(des3_ede)", 3DES_ECB, 0, 8), .keylen[0] = 24, - .chunksize = 8, .walksize = 8, .min_keysize = 24, .max_keysize = 24 }, - { MODE_TAB_CIPH("cbc(des3_ede)", 3DES_CBC, 8, 8), .keylen[0] = 24, - .chunksize = 8, .walksize = 8, .min_keysize = 24, .max_keysize = 24 }, -}; - -static int spacc_skcipher_fallback(unsigned char *name, - struct skcipher_request *req, int enc_dec) -{ - int ret = 0; - struct crypto_skcipher *reqtfm = crypto_skcipher_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); - - tctx->fb.cipher = crypto_alloc_skcipher(name, - CRYPTO_ALG_TYPE_SKCIPHER, - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(tctx->fb.cipher)) - return PTR_ERR(tctx->fb.cipher); - - crypto_skcipher_set_reqsize(reqtfm, - sizeof(struct spacc_crypto_reqctx) + - crypto_skcipher_reqsize(tctx->fb.cipher)); - ret = crypto_skcipher_setkey(tctx->fb.cipher, tctx->cipher_key, - tctx->key_len); - if (ret) - return ret; - - skcipher_request_set_tfm(&ctx->fb.cipher_req, tctx->fb.cipher); - skcipher_request_set_crypt(&ctx->fb.cipher_req, req->src, req->dst, - req->cryptlen, req->iv); - - if (enc_dec) - ret = crypto_skcipher_decrypt(&ctx->fb.cipher_req); - else - ret = crypto_skcipher_encrypt(&ctx->fb.cipher_req); - - crypto_free_skcipher(tctx->fb.cipher); - tctx->fb.cipher = NULL; - - kfree(tctx->cipher_key); - tctx->cipher_key = NULL; - - return ret; -} - -static void spacc_cipher_cleanup_dma(struct device *dev, - struct skcipher_request *req) -{ - struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); - struct spacc_crypto_ctx *tctx = ctx->ccb.tctx; - - if (req->dst != req->src) { - if (ctx->src_nents) { - dma_unmap_sg(dev, req->src, ctx->src_nents, - DMA_TO_DEVICE); - pdu_ddt_free(&ctx->src); - } - - if (ctx->dst_nents) { - dma_unmap_sg(dev, req->dst, ctx->dst_nents, - DMA_FROM_DEVICE); - pdu_ddt_free(&ctx->dst); - } - } else { - if (ctx->src_nents) { - dma_unmap_sg(dev, req->src, ctx->src_nents, - DMA_TO_DEVICE); - pdu_ddt_free(&ctx->src); - } - } - - kfree(tctx->cipher_key); - tctx->cipher_key = NULL; -} - -static void spacc_cipher_cb(void *spacc, void *tfm) -{ - int err = -1; - struct cipher_cb_data *cb = tfm; - struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(cb->req); - - u32 status_reg = readl(cb->spacc->regmap + SPACC_REG_STATUS); - u32 status_ret = (status_reg >> 24) & 0x03; - - if (ctx->mode == CRYPTO_MODE_DES_CBC || - ctx->mode == CRYPTO_MODE_3DES_CBC) { - spacc_read_context(cb->spacc, cb->tctx->handle, - SPACC_CRYPTO_OPERATION, NULL, 0, - cb->req->iv, 8); - } else if (ctx->mode != CRYPTO_MODE_DES_ECB && - ctx->mode != CRYPTO_MODE_3DES_ECB && - ctx->mode != CRYPTO_MODE_SM4_ECB && - ctx->mode != CRYPTO_MODE_AES_ECB && - ctx->mode != CRYPTO_MODE_SM4_XTS && - ctx->mode != CRYPTO_MODE_KASUMI_ECB) { - if (status_ret == 0x3) { - err = -EINVAL; - goto CALLBACK_ERR; - } - spacc_read_context(cb->spacc, cb->tctx->handle, - SPACC_CRYPTO_OPERATION, NULL, 0, - cb->req->iv, 16); - } - - if (ctx->mode != CRYPTO_MODE_DES_ECB && - ctx->mode != CRYPTO_MODE_DES_CBC && - ctx->mode != CRYPTO_MODE_3DES_ECB && - ctx->mode != CRYPTO_MODE_3DES_CBC) { - if (status_ret == 0x03) { - err = -EINVAL; - goto CALLBACK_ERR; - } - } - - if (ctx->mode == CRYPTO_MODE_SM4_ECB && status_ret == 0x03) { - err = -EINVAL; - goto CALLBACK_ERR; - } - - if (cb->req->dst != cb->req->src) - dma_sync_sg_for_cpu(cb->tctx->dev, cb->req->dst, ctx->dst_nents, - DMA_FROM_DEVICE); - - err = cb->spacc->job[cb->new_handle].job_err; - -CALLBACK_ERR: - spacc_cipher_cleanup_dma(cb->tctx->dev, cb->req); - spacc_close(cb->spacc, cb->new_handle); - skcipher_request_complete(cb->req, err); -} - -static int spacc_cipher_init_dma(struct device *dev, - struct skcipher_request *req) -{ - struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); - int rc; - - if (req->src == req->dst) { - rc = spacc_sg_to_ddt(dev, req->src, req->cryptlen, &ctx->src, - DMA_TO_DEVICE); - if (rc < 0) { - pdu_ddt_free(&ctx->src); - return rc; - } - ctx->src_nents = rc; - } else { - rc = spacc_sg_to_ddt(dev, req->src, req->cryptlen, &ctx->src, - DMA_TO_DEVICE); - if (rc < 0) { - pdu_ddt_free(&ctx->src); - return rc; - } - ctx->src_nents = rc; - - rc = spacc_sg_to_ddt(dev, req->dst, req->cryptlen, &ctx->dst, - DMA_FROM_DEVICE); - if (rc < 0) { - pdu_ddt_free(&ctx->dst); - return rc; - } - ctx->dst_nents = rc; - } - - return 0; -} - -static int spacc_cipher_cra_init(struct crypto_tfm *tfm) -{ - struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm); - const struct spacc_alg *salg = spacc_tfm_skcipher(tfm); - - tctx->keylen = 0; - tctx->cipher_key = NULL; - tctx->handle = -1; - tctx->ctx_valid = false; - tctx->dev = get_device(salg->dev[0]); - - crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), - sizeof(struct spacc_crypto_reqctx)); - - return 0; -} - -static void spacc_cipher_cra_exit(struct crypto_tfm *tfm) -{ - struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm); - struct spacc_priv *priv = dev_get_drvdata(tctx->dev); - - - if (tctx->handle >= 0) - spacc_close(&priv->spacc, tctx->handle); - - put_device(tctx->dev); -} - - -static int spacc_check_keylen(const struct spacc_alg *salg, unsigned int keylen) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(salg->mode->keylen); i++) - if (salg->mode->keylen[i] == keylen) - return 0; - - return -EINVAL; -} - -static int spacc_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - int ret = 0, rc = 0, err; - const struct spacc_alg *salg = spacc_tfm_skcipher(&tfm->base); - struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(tfm); - struct spacc_priv *priv = dev_get_drvdata(tctx->dev); - struct spacc_crypto_reqctx *ctx = crypto_skcipher_ctx(tfm); - - err = spacc_check_keylen(salg, keylen); - if (err) - return err; - - ctx->mode = salg->mode->id; - tctx->key_len = keylen; - tctx->cipher_key = kmalloc(keylen, GFP_KERNEL); - memcpy(tctx->cipher_key, key, keylen); - - if (tctx->handle >= 0) { - spacc_close(&priv->spacc, tctx->handle); - put_device(tctx->dev); - tctx->handle = -1; - tctx->dev = NULL; - } - - priv = NULL; - priv = dev_get_drvdata(salg->dev[0]); - tctx->dev = get_device(salg->dev[0]); - ret = spacc_isenabled(&priv->spacc, salg->mode->id, - keylen); - if (ret) - tctx->handle = spacc_open(&priv->spacc, salg->mode->id, - CRYPTO_MODE_NULL, -1, 0, - spacc_cipher_cb, tfm); - - if (tctx->handle < 0) { - put_device(salg->dev[0]); - dev_dbg(salg->dev[0], "failed to open SPAcc context\n"); - return -EINVAL; - } - - /* Weak key Implementation for DES_ECB */ - if (salg->mode->id == CRYPTO_MODE_DES_ECB) { - err = verify_skcipher_des_key(tfm, key); - if (err) - return -EINVAL; - } - - if (salg->mode->id == CRYPTO_MODE_SM4_F8 || - salg->mode->id == CRYPTO_MODE_AES_F8) { - /* f8 mode requires an IV of 128-bits and a key-salt mask, - * equivalent in size to the key. - * AES-F8 or SM4-F8 mode has a SALTKEY prepended to the base - * key. - */ - rc = spacc_write_context(&priv->spacc, tctx->handle, - SPACC_CRYPTO_OPERATION, key, 16, - NULL, 0); - } else { - rc = spacc_write_context(&priv->spacc, tctx->handle, - SPACC_CRYPTO_OPERATION, key, keylen, - NULL, 0); - } - - if (rc < 0) { - dev_dbg(salg->dev[0], "failed with SPAcc write context\n"); - return -EINVAL; - } - - return 0; -} - -static int spacc_cipher_process(struct skcipher_request *req, int enc_dec) -{ - u8 ivc1[16]; - unsigned char *name; - unsigned int len = 0; - u32 num_iv = 0, diff; - u64 num_iv64 = 0, diff64; - unsigned char chacha20_iv[16]; - int rc = 0, ret = 0, i = 0, j = 0; - struct crypto_skcipher *reqtfm = crypto_skcipher_reqtfm(req); - struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(reqtfm); - struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); - struct spacc_priv *priv = dev_get_drvdata(tctx->dev); - const struct spacc_alg *salg = spacc_tfm_skcipher(&reqtfm->base); - struct spacc_device *device_h = &priv->spacc; - - len = ctx->spacc_cipher_cryptlen / 16; - - if (req->cryptlen == 0) { - if (salg->mode->id == CRYPTO_MODE_SM4_CS3 || - salg->mode->id == CRYPTO_MODE_SM4_XTS || - salg->mode->id == CRYPTO_MODE_AES_XTS || - salg->mode->id == CRYPTO_MODE_AES_CS3) - return -EINVAL; - else - return 0; - } - - /* Given IV - <1st 4-bytes as counter value> - * - * Reversing the order of nonce & counter as, - * <1st 12-bytes as nonce> - * - * and then write to HW context, - * ex: - * Given IV - 2a000000000000000000000000000002 - * Reverse order - 0000000000000000000000020000002a - */ - if (salg->mode->id == CRYPTO_MODE_CHACHA20_STREAM) { - for (i = 4; i < 16; i++) { - chacha20_iv[j] = req->iv[i]; - j++; - } - - j = j + 3; - - for (i = 0; i <= 3; i++) { - chacha20_iv[j] = req->iv[i]; - j--; - } - memcpy(req->iv, chacha20_iv, 16); - } - - if (salg->mode->id == CRYPTO_MODE_SM4_CFB) { - if (req->cryptlen % 16 != 0) { - name = salg->calg->cra_name; - ret = spacc_skcipher_fallback(name, req, enc_dec); - return ret; - } - } - - if (salg->mode->id == CRYPTO_MODE_SM4_XTS || - salg->mode->id == CRYPTO_MODE_SM4_CS3 || - salg->mode->id == CRYPTO_MODE_AES_XTS || - salg->mode->id == CRYPTO_MODE_AES_CS3) { - if (req->cryptlen == 16) { - name = salg->calg->cra_name; - ret = spacc_skcipher_fallback(name, req, enc_dec); - return ret; - } - } - - if (salg->mode->id == CRYPTO_MODE_AES_CTR || - salg->mode->id == CRYPTO_MODE_SM4_CTR) { - /* copy the IV to local buffer */ - for (i = 0; i < 16; i++) - ivc1[i] = req->iv[i]; - - /* 64-bit counter width */ - if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) & (0x3)) { - - for (i = 8; i < 16; i++) { - num_iv64 <<= 8; - num_iv64 |= ivc1[i]; - } - - diff64 = SPACC_CTR_IV_MAX64 - num_iv64; - - if (len > diff64) { - name = salg->calg->cra_name; - ret = spacc_skcipher_fallback(name, - req, enc_dec); - return ret; - } - /* 32-bit counter width */ - } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) - & (0x2)) { - - for (i = 12; i < 16; i++) { - num_iv <<= 8; - num_iv |= ivc1[i]; - } - - diff = SPACC_CTR_IV_MAX32 - num_iv; - - if (len > diff) { - name = salg->calg->cra_name; - ret = spacc_skcipher_fallback(name, - req, enc_dec); - return ret; - } - /* 16-bit counter width */ - } else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) - & (0x1)) { - - for (i = 14; i < 16; i++) { - num_iv <<= 8; - num_iv |= ivc1[i]; - } - - diff = SPACC_CTR_IV_MAX16 - num_iv; - - if (len > diff) { - name = salg->calg->cra_name; - ret = spacc_skcipher_fallback(name, - req, enc_dec); - return ret; - } - /* 8-bit counter width */ - } else if ((readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) - & 0x7) == 0) { - - for (i = 15; i < 16; i++) { - num_iv <<= 8; - num_iv |= ivc1[i]; - } - - diff = SPACC_CTR_IV_MAX8 - num_iv; - - if (len > diff) { - name = salg->calg->cra_name; - ret = spacc_skcipher_fallback(name, - req, enc_dec); - return ret; - } - } - } - - if (salg->mode->id == CRYPTO_MODE_DES_CBC || - salg->mode->id == CRYPTO_MODE_3DES_CBC) - rc = spacc_write_context(&priv->spacc, tctx->handle, - SPACC_CRYPTO_OPERATION, NULL, 0, - req->iv, 8); - else if (salg->mode->id != CRYPTO_MODE_DES_ECB && - salg->mode->id != CRYPTO_MODE_3DES_ECB && - salg->mode->id != CRYPTO_MODE_SM4_ECB && - salg->mode->id != CRYPTO_MODE_AES_ECB && - salg->mode->id != CRYPTO_MODE_KASUMI_ECB) - rc = spacc_write_context(&priv->spacc, tctx->handle, - SPACC_CRYPTO_OPERATION, NULL, 0, - req->iv, 16); - - if (rc < 0) - pr_err("ERR: spacc_write_context\n"); - - /* Initialize the DMA */ - rc = spacc_cipher_init_dma(tctx->dev, req); - - ctx->ccb.new_handle = spacc_clone_handle(&priv->spacc, tctx->handle, - &ctx->ccb); - if (ctx->ccb.new_handle < 0) { - spacc_cipher_cleanup_dma(tctx->dev, req); - dev_dbg(salg->dev[0], "failed to clone handle\n"); - return -EINVAL; - } - - /* copying the data to clone handle */ - ctx->ccb.tctx = tctx; - ctx->ccb.ctx = ctx; - ctx->ccb.req = req; - ctx->ccb.spacc = &priv->spacc; - - if (salg->mode->id == CRYPTO_MODE_SM4_CS3) { - int handle = ctx->ccb.new_handle; - - if (handle < 0 || handle > SPACC_MAX_JOBS) - return -ENXIO; - - device_h->job[handle].auxinfo_cs_mode = 3; - } - - if (enc_dec) { /* for decrypt */ - rc = spacc_set_operation(&priv->spacc, ctx->ccb.new_handle, 1, - ICV_IGNORE, IP_ICV_IGNORE, 0, 0, 0); - spacc_set_key_exp(&priv->spacc, ctx->ccb.new_handle); - } else { /* for encrypt */ - rc = spacc_set_operation(&priv->spacc, ctx->ccb.new_handle, 0, - ICV_IGNORE, IP_ICV_IGNORE, 0, 0, 0); - } - - rc = spacc_packet_enqueue_ddt(&priv->spacc, ctx->ccb.new_handle, - &ctx->src, - (req->dst == req->src) ? &ctx->src : - &ctx->dst, - req->cryptlen, - 0, 0, 0, 0, 0); - if (rc < 0) { - spacc_cipher_cleanup_dma(tctx->dev, req); - spacc_close(&priv->spacc, ctx->ccb.new_handle); - - if (rc != -EBUSY && rc < 0) { - dev_err(tctx->dev, - "failed to enqueue job, ERR: %d\n", rc); - return rc; - } else if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { - return -EBUSY; - } - } - - priv->spacc.job[tctx->handle].first_use = 0; - priv->spacc.job[tctx->handle].ctrl &= - ~(1UL << priv->spacc.config.ctrl_map[SPACC_CTRL_KEY_EXP]); - - return -EINPROGRESS; -} - -static int spacc_cipher_encrypt(struct skcipher_request *req) -{ - int rv = 0; - struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); - - ctx->spacc_cipher_cryptlen = req->cryptlen; - - /* enc_dec - 0(encrypt), 1(decrypt) */ - rv = spacc_cipher_process(req, 0); - - return rv; -} - -static int spacc_cipher_decrypt(struct skcipher_request *req) -{ - int rv = 0; - struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req); - - ctx->spacc_cipher_cryptlen = req->cryptlen; - - /* enc_dec - 0(encrypt), 1(decrypt) */ - rv = spacc_cipher_process(req, 1); - - return rv; -} - -static struct skcipher_alg spacc_skcipher_alg = { - .setkey = spacc_cipher_setkey, - .encrypt = spacc_cipher_encrypt, - .decrypt = spacc_cipher_decrypt, - /* - * Chunksize: Equal to the block size except for stream cipher - * such as CTR where it is set to the underlying block size. - * - * Walksize: Equal to the chunk size except in cases where the - * algorithm is considerably more efficient if it can operate on - * multiple chunks in parallel. Should be a multiple of chunksize. - */ - .min_keysize = 16, - .max_keysize = 64, - .ivsize = 16, - .chunksize = 16, - .walksize = 16, - .base = { - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_ALLOCATES_MEMORY | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = 16, - .cra_ctxsize = sizeof(struct spacc_crypto_ctx), - .cra_priority = 300, - .cra_init = spacc_cipher_cra_init, - .cra_exit = spacc_cipher_cra_exit, - .cra_module = THIS_MODULE, - }, -}; - -static void spacc_init_calg(struct crypto_alg *calg, - const struct mode_tab *mode) -{ - - strscpy(calg->cra_name, mode->name, sizeof(mode->name) - 1); - calg->cra_name[sizeof(mode->name) - 1] = '\0'; - - strscpy(calg->cra_driver_name, "spacc-"); - strcat(calg->cra_driver_name, mode->name); - calg->cra_driver_name[sizeof(calg->cra_driver_name) - 1] = '\0'; - calg->cra_blocksize = mode->blocklen; -} - -static int spacc_register_cipher(struct spacc_alg *salg, - unsigned int algo_idx) -{ - int rc; - - salg->calg = &salg->alg.skcipher.base; - salg->alg.skcipher = spacc_skcipher_alg; - - /* this function will assign mode->name to calg->cra_name & - * calg->cra_driver_name - */ - spacc_init_calg(salg->calg, salg->mode); - salg->alg.skcipher.ivsize = salg->mode->ivlen; - salg->alg.skcipher.base.cra_blocksize = salg->mode->blocklen; - - salg->alg.skcipher.chunksize = possible_ciphers[algo_idx].chunksize; - salg->alg.skcipher.walksize = possible_ciphers[algo_idx].walksize; - salg->alg.skcipher.min_keysize = possible_ciphers[algo_idx].min_keysize; - salg->alg.skcipher.max_keysize = possible_ciphers[algo_idx].max_keysize; - - rc = crypto_register_skcipher(&salg->alg.skcipher); - if (rc < 0) - return rc; - - mutex_lock(&spacc_cipher_alg_mutex); - list_add(&salg->list, &spacc_cipher_alg_list); - mutex_unlock(&spacc_cipher_alg_mutex); - - return 0; -} - -int probe_ciphers(struct platform_device *spacc_pdev) -{ - int rc; - unsigned int i, y; - int registered = 0; - struct spacc_alg *salg; - struct spacc_priv *priv = dev_get_drvdata(&spacc_pdev->dev); - - for (i = 0; i < ARRAY_SIZE(possible_ciphers); i++) - possible_ciphers[i].valid = 0; - - for (i = 0; i < ARRAY_SIZE(possible_ciphers) && - (possible_ciphers[i].valid == 0); i++) { - for (y = 0; y < 3; y++) { - if (spacc_isenabled(&priv->spacc, - possible_ciphers[i].id & 0xFF, - possible_ciphers[i].keylen[y])) { - salg = kmalloc(sizeof(*salg), GFP_KERNEL); - if (!salg) - return -ENOMEM; - - salg->mode = &possible_ciphers[i]; - salg->dev[0] = &spacc_pdev->dev; - - if (possible_ciphers[i].valid == 0) { - rc = spacc_register_cipher(salg, i); - if (rc < 0) { - kfree(salg); - continue; - } - } - dev_dbg(&spacc_pdev->dev, "registered %s\n", - possible_ciphers[i].name); - registered++; - possible_ciphers[i].valid = 1; - } - } - } - - return registered; -} - -int spacc_unregister_cipher_algs(void) -{ - struct spacc_alg *salg, *tmp; - - mutex_lock(&spacc_cipher_alg_mutex); - - list_for_each_entry_safe(salg, tmp, &spacc_cipher_alg_list, list) { - crypto_unregister_skcipher(&salg->alg.skcipher); - list_del(&salg->list); - kfree(salg); - } - - mutex_unlock(&spacc_cipher_alg_mutex); - - return 0; -} From 78cb66caa6ab5385ac2090f1aae5f3c19e08f522 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 26 Aug 2024 15:04:15 +0800 Subject: [PATCH 75/96] hwrng: mtk - Use devm_pm_runtime_enable Replace pm_runtime_enable with the devres-enabled version which can trigger pm_runtime_disable. Otherwise, the below appears during reload driver. mtk_rng 1020f000.rng: Unbalanced pm_runtime_enable! Fixes: 81d2b34508c6 ("hwrng: mtk - add runtime PM support") Cc: Suggested-by: Chen-Yu Tsai Signed-off-by: Guoqing Jiang Signed-off-by: Herbert Xu --- drivers/char/hw_random/mtk-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index aa993753ab12..1e3048f2bb38 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); dev_info(&pdev->dev, "registered RNG driver\n"); From c299d7af9dfb9b7abfc23cb87287c4f54ade92ac Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 27 Aug 2024 11:50:01 -0700 Subject: [PATCH 76/96] crypto: x86/aesni - update docs for aesni-intel module Update the kconfig help and module description to reflect that VAES instructions are now used in some cases. Also fix XTR => XCTR. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/Kconfig | 8 ++++++-- arch/x86/crypto/aesni-intel_glue.c | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 24875e6295f2..7b1bebed879d 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -14,7 +14,7 @@ config CRYPTO_CURVE25519_X86 - ADX (large integer arithmetic) config CRYPTO_AES_NI_INTEL - tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTR, XTS, GCM (AES-NI)" + tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XCTR, XTS, GCM (AES-NI/VAES)" depends on X86 select CRYPTO_AEAD select CRYPTO_LIB_AES @@ -25,10 +25,14 @@ config CRYPTO_AES_NI_INTEL help Block cipher: AES cipher algorithms AEAD cipher: AES with GCM - Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTR, XTS + Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XCTR, XTS Architecture: x86 (32-bit and 64-bit) using: - AES-NI (AES new instructions) + - VAES (Vector AES) + + Some algorithm implementations are supported only in 64-bit builds, + and some have additional prerequisites such as AVX2 or AVX512. config CRYPTO_BLOWFISH_X86_64 tristate "Ciphers: Blowfish, modes: ECB, CBC" diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index d63ba9eaba3e..b0dd83555499 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1750,6 +1750,6 @@ static void __exit aesni_exit(void) late_initcall(aesni_init); module_exit(aesni_exit); -MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); +MODULE_DESCRIPTION("AES cipher and modes, optimized with AES-NI or VAES instructions"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("aes"); From 3401f63e72596dcb7d912a5b67b4291643cc1034 Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Thu, 29 Aug 2024 12:20:07 +0200 Subject: [PATCH 77/96] crypto: ccp - do not request interrupt on cmd completion when irqs disabled While sending a command to the PSP, we always requested an interrupt from the PSP after command completion. This worked for most cases. For the special case of irqs being disabled -- e.g. when running within crashdump or kexec contexts, we should not set the SEV_CMDRESP_IOC flag, so the PSP knows to not attempt interrupt delivery. Fixes: 8ef979584ea8 ("crypto: ccp: Add panic notifier for SEV/SNP firmware shutdown on kdump") Based-on-patch-by: Tom Lendacky Signed-off-by: Amit Shah Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index ff17b651c328..af018afd9cd7 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -910,7 +910,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) sev->int_rcvd = 0; - reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd); + + /* + * If invoked during panic handling, local interrupts are disabled so + * the PSP command completion interrupt can't be used. + * sev_wait_cmd_ioc() already checks for interrupts disabled and + * polls for PSP command completion. Ensure we do not request an + * interrupt from the PSP if irqs disabled. + */ + if (!irqs_disabled()) + reg |= SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); /* wait for command completion */ From f386dc64e1a5d3dcb84579119ec350ab026fea88 Mon Sep 17 00:00:00 2001 From: Yang Shen Date: Sat, 31 Aug 2024 17:50:07 +0800 Subject: [PATCH 78/96] crypto: hisilicon - fix missed error branch If an error occurs in the process after the SGL is mapped successfully, it need to unmap the SGL. Otherwise, memory problems may occur. Signed-off-by: Yang Shen Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sgl.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 568acd0aee3f..c974f95cd126 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -225,7 +225,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, dma_addr_t curr_sgl_dma = 0; struct acc_hw_sge *curr_hw_sge; struct scatterlist *sg; - int sg_n; + int sg_n, ret; if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count) return ERR_PTR(-EINVAL); @@ -240,14 +240,15 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, if (sg_n_mapped > pool->sge_nr) { dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n"); - return ERR_PTR(-EINVAL); + ret = -EINVAL; + goto err_unmap; } curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); if (IS_ERR(curr_hw_sgl)) { dev_err(dev, "Get SGL error!\n"); - dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto err_unmap; } curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); curr_hw_sge = curr_hw_sgl->sge_entries; @@ -262,6 +263,11 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, *hw_sgl_dma = curr_sgl_dma; return curr_hw_sgl; + +err_unmap: + dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); From 5fdb4b345cfb4f12ea95f6680779080e4e354100 Mon Sep 17 00:00:00 2001 From: Chenghai Huang Date: Sat, 31 Aug 2024 17:50:08 +0800 Subject: [PATCH 79/96] crypto: hisilicon - add a lock for the qp send operation Apply for a lock before the qp send operation to ensure no resource race in multi-concurrency situations. This modification has almost no impact on performance. Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 2 ++ drivers/crypto/hisilicon/zip/zip_crypto.c | 3 +++ 2 files changed, 5 insertions(+) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 764532a6ca82..c167dbd6c7d6 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -575,7 +575,9 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) do { atomic64_inc(&dfx[HPRE_SEND_CNT].value); + spin_lock_bh(&ctx->req_lock); ret = hisi_qp_send(ctx->qp, msg); + spin_unlock_bh(&ctx->req_lock); if (ret != -EBUSY) break; atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 92d3bd0dfe1b..7327f8f29b01 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -213,6 +213,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, { struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct acomp_req *a_req = req->req; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; @@ -244,7 +245,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, /* send command to start a task */ atomic64_inc(&dfx->send_cnt); + spin_lock_bh(&req_q->req_lock); ret = hisi_qp_send(qp, &zip_sqe); + spin_unlock_bh(&req_q->req_lock); if (unlikely(ret < 0)) { atomic64_inc(&dfx->send_busy_cnt); ret = -EAGAIN; From f5dd7c43022799ac5c4e3a0d445f9c293a198413 Mon Sep 17 00:00:00 2001 From: Chenghai Huang Date: Sat, 31 Aug 2024 17:50:09 +0800 Subject: [PATCH 80/96] crypto: hisilicon/trng - modifying the order of header files Header files is included Order-ref: standard library headers, OS library headers, and project-specific headers. This patch modifies the order of header files according to suggestions. In addition, use %u to print unsigned int variables to prevent overflow. Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/trng/trng.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c index 451b167bcc73..66c551ecdee8 100644 --- a/drivers/crypto/hisilicon/trng/trng.c +++ b/drivers/crypto/hisilicon/trng/trng.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ +#include #include #include #include @@ -13,7 +14,6 @@ #include #include #include -#include #define HISI_TRNG_REG 0x00F0 #define HISI_TRNG_BYTES 4 @@ -121,7 +121,7 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src, u32 i; if (dlen > SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES || dlen == 0) { - pr_err("dlen(%d) exceeds limit(%d)!\n", dlen, + pr_err("dlen(%u) exceeds limit(%d)!\n", dlen, SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES); return -EINVAL; } From 5d2d1ee0874c26b8010ddf7f57e2f246e848af38 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 31 Aug 2024 19:48:29 +0800 Subject: [PATCH 81/96] crypto: hisilicon/qm - reset device before enabling it Before the device is enabled again, the device may still store the previously processed data. If an error occurs in the previous task, the device may fail to be enabled again. Therefore, before enabling device, reset the device to restore the initial state. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 32 +++--- drivers/crypto/hisilicon/qm.c | 114 +++++++++++++++------- drivers/crypto/hisilicon/sec2/sec_main.c | 16 ++- drivers/crypto/hisilicon/zip/zip_main.c | 23 +++-- 4 files changed, 121 insertions(+), 64 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 10aa4da93323..12d52713b95e 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -358,6 +358,8 @@ static struct dfx_diff_registers hpre_diff_regs[] = { }, }; +static const struct hisi_qm_err_ini hpre_err_ini; + bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; @@ -1161,6 +1163,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; + qm->err_ini = &hpre_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } @@ -1350,8 +1353,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) hpre_open_sva_prefetch(qm); - qm->err_ini = &hpre_err_ini; - qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) @@ -1380,6 +1381,18 @@ static int hpre_probe_init(struct hpre *hpre) return 0; } +static void hpre_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hpre_cnt_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + hpre_show_last_regs_uninit(qm); + hpre_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); +} + static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_qm *qm; @@ -1405,7 +1418,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = hisi_qm_start(qm); if (ret) - goto err_with_err_init; + goto err_with_probe_init; ret = hpre_debugfs_init(qm); if (ret) @@ -1444,9 +1457,8 @@ err_qm_del_list: hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); -err_with_err_init: - hpre_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); +err_with_probe_init: + hpre_probe_uninit(qm); err_with_qm_init: hisi_qm_uninit(qm); @@ -1468,13 +1480,7 @@ static void hpre_remove(struct pci_dev *pdev) hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); - if (qm->fun_type == QM_HW_PF) { - hpre_cnt_regs_clear(qm); - qm->debug.curr_qm_qp_num = 0; - hpre_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); - } - + hpre_probe_uninit(qm); hisi_qm_uninit(qm); } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f614fd228b56..b2b5f15abdf7 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -450,6 +450,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { }; static void qm_irqs_unregister(struct hisi_qm *qm); +static int qm_reset_device(struct hisi_qm *qm); static u32 qm_get_hw_error_status(struct hisi_qm *qm) { @@ -4108,6 +4109,22 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return 0; } +static int qm_master_ooo_check(struct hisi_qm *qm) +{ + u32 val; + int ret; + + /* Check the ooo register of the device before resetting the device. */ + writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, + val, (val == ACC_MASTER_TRANS_RETURN_RW), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) + pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); + + return ret; +} + static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) { u32 nfe_enb = 0; @@ -4130,11 +4147,10 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) } } -static int qm_soft_reset(struct hisi_qm *qm) +static int qm_soft_reset_prepare(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val; /* Ensure all doorbells and mailboxes received by QM */ ret = qm_check_req_recv(qm); @@ -4156,29 +4172,23 @@ static int qm_soft_reset(struct hisi_qm *qm) } qm_dev_ecc_mbit_handle(qm); - - /* OOO register set and check */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - /* If bus lock, reset chip */ - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - } if (qm->err_ini->close_sva_prefetch) qm->err_ini->close_sva_prefetch(qm); ret = qm_set_pf_mse(qm, false); - if (ret) { + if (ret) pci_err(pdev, "Fails to disable pf MSE bit.\n"); - return ret; - } + + return ret; +} + +static int qm_reset_device(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; /* The reset related sub-control registers are not in PCI BAR */ if (ACPI_HANDLE(&pdev->dev)) { @@ -4197,12 +4207,23 @@ static int qm_soft_reset(struct hisi_qm *qm) pci_err(pdev, "Reset step %llu failed!\n", value); return -EIO; } - } else { - pci_err(pdev, "No reset method!\n"); - return -EINVAL; + + return 0; } - return 0; + pci_err(pdev, "No reset method!\n"); + return -EINVAL; +} + +static int qm_soft_reset(struct hisi_qm *qm) +{ + int ret; + + ret = qm_soft_reset_prepare(qm); + if (ret) + return ret; + + return qm_reset_device(qm); } static int qm_vf_reset_done(struct hisi_qm *qm) @@ -5155,6 +5176,35 @@ err_request_mem_regions: return ret; } +static int qm_clear_device(struct hisi_qm *qm) +{ + acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + /* Device does not support reset, return */ + if (!qm->err_ini->err_info_init) + return 0; + qm->err_ini->err_info_init(qm); + + if (!handle) + return 0; + + /* No reset method, return */ + if (!acpi_has_method(handle, qm->err_info.acpi_rst)) + return 0; + + ret = qm_master_ooo_check(qm); + if (ret) { + writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + return ret; + } + + return qm_reset_device(qm); +} + static int hisi_qm_pci_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5184,8 +5234,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) goto err_get_pci_res; } + ret = qm_clear_device(qm); + if (ret) + goto err_free_vectors; + return 0; +err_free_vectors: + pci_free_irq_vectors(pdev); err_get_pci_res: qm_put_pci_res(qm); err_disable_pcidev: @@ -5486,7 +5542,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val; ret = qm->ops->set_msi(qm, false); if (ret) { @@ -5494,18 +5549,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) return ret; } - /* shutdown OOO register */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - } ret = qm_set_pf_mse(qm, false); if (ret) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 75aad04ffe5e..c35533d8930b 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1065,9 +1065,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret; - qm->err_ini = &sec_err_ini; - qm->err_ini->err_info_init(qm); - ret = sec_set_user_domain_and_cache(qm); if (ret) return ret; @@ -1122,6 +1119,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &sec_devices; + qm->err_ini = &sec_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1186,6 +1184,12 @@ static int sec_probe_init(struct sec_dev *sec) static void sec_probe_uninit(struct hisi_qm *qm) { + if (qm->fun_type == QM_HW_VF) + return; + + sec_debug_regs_clear(qm); + sec_show_last_regs_uninit(qm); + sec_close_sva_prefetch(qm); hisi_qm_dev_err_uninit(qm); } @@ -1274,7 +1278,6 @@ err_qm_del_list: sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: - sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); err_qm_uninit: sec_qm_uninit(qm); @@ -1296,11 +1299,6 @@ static void sec_remove(struct pci_dev *pdev) sec_debugfs_exit(qm); (void)hisi_qm_stop(qm, QM_NORMAL); - - if (qm->fun_type == QM_HW_PF) - sec_debug_regs_clear(qm); - sec_show_last_regs_uninit(qm); - sec_probe_uninit(qm); sec_qm_uninit(qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 7c2d803886fd..d07e47b48be0 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1141,8 +1141,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - qm->err_ini = &hisi_zip_err_ini; - qm->err_ini->err_info_init(qm); ret = hisi_zip_set_user_domain_and_cache(qm); if (ret) @@ -1203,6 +1201,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &zip_devices; + qm->err_ini = &hisi_zip_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1269,6 +1268,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) return 0; } +static void hisi_zip_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hisi_zip_show_last_regs_uninit(qm); + hisi_zip_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); +} + static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_zip *hisi_zip; @@ -1295,7 +1304,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = hisi_qm_start(qm); if (ret) - goto err_dev_err_uninit; + goto err_probe_uninit; ret = hisi_zip_debugfs_init(qm); if (ret) @@ -1334,9 +1343,8 @@ err_qm_del_list: hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); -err_dev_err_uninit: - hisi_zip_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); +err_probe_uninit: + hisi_zip_probe_uninit(qm); err_qm_uninit: hisi_zip_qm_uninit(qm); @@ -1358,8 +1366,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); - hisi_zip_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); + hisi_zip_probe_uninit(qm); hisi_zip_qm_uninit(qm); } From 145013f723947c83b1a5f76a0cf6e7237d59e973 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 31 Aug 2024 19:48:30 +0800 Subject: [PATCH 82/96] crypto: hisilicon/hpre - mask cluster timeout error The timeout threshold of the hpre cluster is 16ms. When the CPU and device share virtual address, page fault processing time may exceed the threshold. In the current test, there is a high probability that the cluster times out. However, the cluster is waiting for the completion of memory access, which is not an error, the device does not need to be reset. If an error occurs in the cluster, qm also reports the error. Therefore, the cluster timeout error of hpre can be masked. Fixes: d90fab0deb8e ("crypto: hisilicon/qm - get error type from hardware registers") Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 12d52713b95e..6b536ad2ada5 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -13,9 +13,7 @@ #include #include "hpre.h" -#define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) -#define HPRE_COMM_CNT_CLR_CE 0x0 #define HPRE_CTRL_CNT_CLR_CE 0x301000 #define HPRE_FSM_MAX_CNT 0x301008 #define HPRE_VFG_AXQOS 0x30100c @@ -42,7 +40,6 @@ #define HPRE_HAC_INT_SET 0x301500 #define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 -#define HPRE_CORE_INT_DISABLE GENMASK(21, 0) #define HPRE_RDCHN_INI_ST 0x301a00 #define HPRE_CLSTR_BASE 0x302000 #define HPRE_CORE_EN_OFFSET 0x04 @@ -66,7 +63,6 @@ #define HPRE_CLSTR_ADDR_INTRVL 0x1000 #define HPRE_CLUSTER_INQURY 0x100 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 -#define HPRE_TIMEOUT_ABNML_BIT 6 #define HPRE_PASID_EN_BIT 9 #define HPRE_REG_RD_INTVRL_US 10 #define HPRE_REG_RD_TMOUT_US 1000 @@ -203,9 +199,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, - {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, - {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, - {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, + {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E}, + {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E}, + {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E}, {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, @@ -656,11 +652,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG); - /* HPRE need more time, we close this interrupt */ - val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); - val |= BIT(HPRE_TIMEOUT_ABNML_BIT); - writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); - if (qm->ver >= QM_HW_V3) writel(HPRE_RSA_ENB | HPRE_ECC_ENB, qm->io_base + HPRE_TYPES_ENB); @@ -669,9 +660,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); writel(0x0, qm->io_base + HPRE_BD_ENDIAN); - writel(0x0, qm->io_base + HPRE_INT_MASK); writel(0x0, qm->io_base + HPRE_POISON_BYPASS); - writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE); writel(0x0, qm->io_base + HPRE_ECC_BYPASS); writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); @@ -761,7 +750,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm) static void hpre_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe; + u32 ce, nfe, err_en; ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); @@ -778,7 +767,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm) hpre_master_ooo_ctrl(qm, true); /* enable hpre hw error interrupts */ - writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); + err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE; + writel(~err_en, qm->io_base + HPRE_INT_MASK); } static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) From b04f06fc0243600665b3b50253869533b7938468 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 31 Aug 2024 19:48:31 +0800 Subject: [PATCH 83/96] crypto: hisilicon/qm - inject error before stopping queue The master ooo cannot be completely closed when the accelerator core reports memory error. Therefore, the driver needs to inject the qm error to close the master ooo. Currently, the qm error is injected after stopping queue, memory may be released immediately after stopping queue, causing the device to access the released memory. Therefore, error is injected to close master ooo before stopping queue to ensure that the device does not access the released memory. Fixes: 6c6dd5802c2d ("crypto: hisilicon/qm - add controller reset interface") Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 47 ++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b2b5f15abdf7..07983af9e3e2 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4015,6 +4015,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set) return -ETIMEDOUT; } +static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +{ + u32 nfe_enb = 0; + + /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ + if (qm->ver >= QM_HW_V3) + return; + + if (!qm->err_status.is_dev_ecc_mbit && + qm->err_status.is_qm_ecc_mbit && + qm->err_ini->close_axi_master_ooo) { + qm->err_ini->close_axi_master_ooo(qm); + } else if (qm->err_status.is_dev_ecc_mbit && + !qm->err_status.is_qm_ecc_mbit && + !qm->err_ini->close_axi_master_ooo) { + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + } +} + static int qm_vf_reset_prepare(struct hisi_qm *qm, enum qm_stop_reason stop_reason) { @@ -4079,6 +4101,8 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return ret; } + qm_dev_ecc_mbit_handle(qm); + /* PF obtains the information of VF by querying the register. */ qm_cmd_uninit(qm); @@ -4125,28 +4149,6 @@ static int qm_master_ooo_check(struct hisi_qm *qm) return ret; } -static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) -{ - u32 nfe_enb = 0; - - /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ - if (qm->ver >= QM_HW_V3) - return; - - if (!qm->err_status.is_dev_ecc_mbit && - qm->err_status.is_qm_ecc_mbit && - qm->err_ini->close_axi_master_ooo) { - qm->err_ini->close_axi_master_ooo(qm); - } else if (qm->err_status.is_dev_ecc_mbit && - !qm->err_status.is_qm_ecc_mbit && - !qm->err_ini->close_axi_master_ooo) { - nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); - writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, - qm->io_base + QM_RAS_NFE_ENABLE); - writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); - } -} - static int qm_soft_reset_prepare(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -4171,7 +4173,6 @@ static int qm_soft_reset_prepare(struct hisi_qm *qm) return ret; } - qm_dev_ecc_mbit_handle(qm); ret = qm_master_ooo_check(qm); if (ret) return ret; From e7a4142b35ce489fc8908d75596c51549711ade0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 1 Sep 2024 16:05:40 +0800 Subject: [PATCH 84/96] crypto: api - Fix generic algorithm self-test races On Fri, Aug 30, 2024 at 10:51:54AM -0700, Eric Biggers wrote: > > Given below in defconfig form, use 'make olddefconfig' to apply. The failures > are nondeterministic and sometimes there are different ones, for example: > > [ 0.358017] alg: skcipher: failed to allocate transform for cbc(twofish-generic): -2 > [ 0.358365] alg: self-tests for cbc(twofish) using cbc(twofish-generic) failed (rc=-2) > [ 0.358535] alg: skcipher: failed to allocate transform for cbc(camellia-generic): -2 > [ 0.358918] alg: self-tests for cbc(camellia) using cbc(camellia-generic) failed (rc=-2) > [ 0.371533] alg: skcipher: failed to allocate transform for xts(ecb(aes-generic)): -2 > [ 0.371922] alg: self-tests for xts(aes) using xts(ecb(aes-generic)) failed (rc=-2) > > Modules are not enabled, maybe that matters (I haven't checked yet). Yes I think that was the key. This triggers a massive self-test run which executes in parallel and reveals a few race conditions in the system. I think it boils down to the following scenario: Base algorithm X-generic, X-optimised Template Y Optimised algorithm Y-X-optimised Everything gets registered, and then the self-tests are started. When Y-X-optimised gets tested, it requests the creation of the generic Y(X-generic). Which then itself undergoes testing. The race is that after Y(X-generic) gets registered, but just before it gets tested, X-optimised finally finishes self-testing which then causes all spawns of X-generic to be destroyed. So by the time the self-test for Y(X-generic) comes along, it can no longer find the algorithm. This error then bubbles up all the way up to the self-test of Y-X-optimised which then fails. Note that there is some complexity that I've omitted here because when the generic self-test fails to find Y(X-generic) it actually triggers the construction of it again which then fails for various other reasons (these are not important because the construction should *not* be triggered at this point). So in a way the error is expected, and we should probably remove the pr_err for the case where ENOENT is returned for the algorithm that we're currently testing. The solution is two-fold. First when an algorithm undergoes self-testing it should not trigger its construction. Secondly if an instance larval fails to materialise due to it being destroyed by a more optimised algorithm coming along, it should obviously retry the construction. Remove the check in __crypto_alg_lookup that stops a larval from matching new requests based on differences in the mask. It is better to block new requests even if it is wrong and then simply retry the lookup. If this ends up being the wrong larval it will sort iself out during the retry. Reduce the CRYPTO_ALG_TYPE_MASK bits in type during larval creation as otherwise LSKCIPHER algorithms may not match SKCIPHER larvals. Also block the instance creation during self-testing in the function crypto_larval_lookup by checking for CRYPTO_ALG_TESTED in the mask field. Finally change the return value when crypto_alg_lookup fails in crypto_larval_wait to EAGAIN to redo the lookup. Fixes: 37da5d0ffa7b ("crypto: api - Do not wait for tests during registration") Reported-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/api.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/crypto/api.c b/crypto/api.c index bbe29d438815..bfd177a4313a 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -70,11 +70,6 @@ static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, if ((q->cra_flags ^ type) & mask) continue; - if (crypto_is_larval(q) && - !crypto_is_test_larval((struct crypto_larval *)q) && - ((struct crypto_larval *)q)->mask != mask) - continue; - exact = !strcmp(q->cra_driver_name, name); fuzzy = !strcmp(q->cra_name, name); if (!exact && !(fuzzy && q->cra_priority > best)) @@ -113,6 +108,8 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) if (!larval) return ERR_PTR(-ENOMEM); + type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK); + larval->mask = mask; larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; larval->alg.cra_priority = -1; @@ -229,7 +226,7 @@ again: type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); mask = larval->mask; alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: - ERR_PTR(-ENOENT); + ERR_PTR(-EAGAIN); } else if (IS_ERR(alg)) ; else if (crypto_is_test_larval(larval) && @@ -308,8 +305,12 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg)) alg = crypto_larval_wait(alg); - else if (!alg) + else if (alg) + ; + else if (!(mask & CRYPTO_ALG_TESTED)) alg = crypto_larval_add(name, type, mask); + else + alg = ERR_PTR(-ENOENT); return alg; } From 795f85fca229a88543a0a706039f901106bf11c1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 1 Sep 2024 16:06:56 +0800 Subject: [PATCH 85/96] crypto: algboss - Pass instance creation error up Pass any errors we get during instance creation up through the larval. Signed-off-by: Herbert Xu --- crypto/algboss.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/algboss.c b/crypto/algboss.c index d05a5aad2176..a20926bfd34e 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -51,7 +51,7 @@ static int cryptomgr_probe(void *data) { struct cryptomgr_param *param = data; struct crypto_template *tmpl; - int err; + int err = -ENOENT; tmpl = crypto_lookup_template(param->template); if (!tmpl) @@ -64,6 +64,7 @@ static int cryptomgr_probe(void *data) crypto_tmpl_put(tmpl); out: + param->larval->adult = ERR_PTR(err); param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD; complete_all(¶m->larval->completion); crypto_alg_put(¶m->larval->alg); From a0e435e808d60b163d7bf78ec1002f80a5d127d5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 1 Sep 2024 17:04:30 +0100 Subject: [PATCH 86/96] crypto: hisilicon/sec - Remove trailing space after \n newline There is a extraneous space after a newline in a dev_err message. Remove it. Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec/sec_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index afdddf87cc34..9bafcc5aa404 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -458,7 +458,7 @@ static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[]) static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask) { if (hash_mask & SEC_HASH_IPV4_MASK) { - dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n "); + dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n"); return -EINVAL; } From 2e691e1cd24d4f5cee4bc24d5adc6f1307b9f09f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 1 Sep 2024 17:07:17 +0100 Subject: [PATCH 87/96] crypto: qat - Remove trailing space after \n newline There is a extraneous space after a newline in a pr_err message. Remove it. Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_uclo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index ad2c64af7427..7ea40b4f6e5b 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -58,7 +58,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) unsigned int i; if (!ae_data) { - pr_err("QAT: bad argument, ae_data is NULL\n "); + pr_err("QAT: bad argument, ae_data is NULL\n"); return -EINVAL; } From 4eded6d14f5b7bb857b68872970a40cf3105c015 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Sep 2024 07:33:40 +0800 Subject: [PATCH 88/96] crypto: testmgr - Hide ENOENT errors When a crypto algorithm with a higher priority is registered, it kills the spawns of all lower-priority algorithms. Thus it is to be expected for an algorithm to go away at any time, even during a self-test. This is now much more common with asynchronous testing. Remove the printk when an ENOENT is encountered during a self-test. This is not really an error since the algorithm being tested is no longer there (i.e., it didn't fail the test which is what we care about). Signed-off-by: Herbert Xu --- crypto/testmgr.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f02cb075bd68..ee8da628e9da 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1939,6 +1939,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs, atfm = crypto_alloc_ahash(driver, type, mask); if (IS_ERR(atfm)) { + if (PTR_ERR(atfm) == -ENOENT) + return -ENOENT; pr_err("alg: hash: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(atfm)); return PTR_ERR(atfm); @@ -2703,6 +2705,8 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_aead(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; pr_err("alg: aead: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3280,6 +3284,8 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_skcipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3693,6 +3699,8 @@ static int alg_test_cipher(const struct alg_test_desc *desc, tfm = crypto_alloc_cipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; printk(KERN_ERR "alg: cipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3717,6 +3725,8 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { acomp = crypto_alloc_acomp(driver, type, mask); if (IS_ERR(acomp)) { + if (PTR_ERR(acomp) == -ENOENT) + return -ENOENT; pr_err("alg: acomp: Failed to load transform for %s: %ld\n", driver, PTR_ERR(acomp)); return PTR_ERR(acomp); @@ -3729,6 +3739,8 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, } else { comp = crypto_alloc_comp(driver, type, mask); if (IS_ERR(comp)) { + if (PTR_ERR(comp) == -ENOENT) + return -ENOENT; pr_err("alg: comp: Failed to load transform for %s: %ld\n", driver, PTR_ERR(comp)); return PTR_ERR(comp); @@ -3805,6 +3817,8 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, rng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(rng)) { + if (PTR_ERR(rng) == -ENOENT) + return -ENOENT; printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(rng)); return PTR_ERR(rng); @@ -3832,10 +3846,13 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, drng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(drng)) { + if (PTR_ERR(drng) == -ENOENT) + goto out_no_rng; printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); +out_no_rng: kfree_sensitive(buf); - return -ENOMEM; + return PTR_ERR(drng); } test_data.testentropy = &testentropy; @@ -4077,6 +4094,8 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_kpp(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -4305,6 +4324,8 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_akcipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); From c398cb8eb0a263a1b7a18892d9f244751689675c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 5 Sep 2024 10:21:49 +0800 Subject: [PATCH 89/96] crypto: octeontx* - Select CRYPTO_AUTHENC Select CRYPTO_AUTHENC as the function crypto_authenec_extractkeys may not be available without it. Fixes: 311eea7e37c4 ("crypto: octeontx - Fix authenc setkey") Fixes: 7ccb750dcac8 ("crypto: octeontx2 - Fix authenc setkey") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202409042013.gT2ZI4wR-lkp@intel.com/ Signed-off-by: Herbert Xu --- drivers/crypto/marvell/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig index a48591af12d0..78217577aa54 100644 --- a/drivers/crypto/marvell/Kconfig +++ b/drivers/crypto/marvell/Kconfig @@ -28,6 +28,7 @@ config CRYPTO_DEV_OCTEONTX_CPT select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD + select CRYPTO_AUTHENC select CRYPTO_DEV_MARVELL help This driver allows you to utilize the Marvell Cryptographic @@ -47,6 +48,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD + select CRYPTO_AUTHENC select NET_DEVLINK help This driver allows you to utilize the Marvell Cryptographic From 30fed346a3ffcf27da9e7a74466502116186146e Mon Sep 17 00:00:00 2001 From: Riyan Dhiman Date: Tue, 3 Sep 2024 18:25:39 +0530 Subject: [PATCH 90/96] crypto: aegis128 - Fix indentation issue in crypto_aegis128_process_crypt() The code in crypto_aegis128_process_crypt() had an indentation issue where spaces were used instead of tabs. This commit corrects the indentation to use tabs, adhering to the Linux kernel coding style guidelines. Issue reported by checkpatch: - ERROR: code indent should use tabs where possible No functional changes are intended. Signed-off-by: Riyan Dhiman Signed-off-by: Herbert Xu --- crypto/aegis128-core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index c4f1bfa1d04f..4fdb53435827 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -323,8 +323,9 @@ static __always_inline int crypto_aegis128_process_crypt(struct aegis_state *state, struct skcipher_walk *walk, void (*crypt)(struct aegis_state *state, - u8 *dst, const u8 *src, - unsigned int size)) + u8 *dst, + const u8 *src, + unsigned int size)) { int err = 0; From 48b8843a0b74b0c2ff6aa44b31b27158f7d26306 Mon Sep 17 00:00:00 2001 From: Nikunj Kela Date: Thu, 5 Sep 2024 12:06:05 -0700 Subject: [PATCH 91/96] dt-bindings: crypto: qcom,prng: document support for SA8255p Document SA8255p compatible for the True Random Number Generator. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Nikunj Kela Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/crypto/qcom,prng.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml index 89c88004b41b..048b769a73c0 100644 --- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml @@ -17,6 +17,7 @@ properties: - qcom,prng-ee # 8996 and later using EE - items: - enum: + - qcom,sa8255p-trng - qcom,sa8775p-trng - qcom,sc7280-trng - qcom,sm8450-trng From 3e87031a6ce68f13722155497cd511a00b56a2ae Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Thu, 5 Sep 2024 20:25:20 -0400 Subject: [PATCH 92/96] crypto: qcom-rng - fix support for ACPI-based systems MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The qcom-rng driver supports both ACPI and device tree-based systems. ACPI support was broken when the hw_random interface support was added. Let's go ahead and fix this by adding the appropriate driver data to the ACPI match table, and change the of_device_get_match_data() call to device_get_match_data() so that it will also work on ACPI-based systems. This fix was boot tested on a Qualcomm Amberwing server (ACPI based) and on a Qualcomm SA8775p Automotive Development Board (DT based). I also verified that qcom-rng shows up in /proc/crypto on both systems. Fixes: f29cd5bb64c2 ("crypto: qcom-rng - Add hw_random interface support") Reported-by: Ernesto A. Fernández Closes: https://lore.kernel.org/linux-arm-msm/20240828184019.GA21181@eaf/ Cc: stable@vger.kernel.org Signed-off-by: Brian Masney Reviewed-by: Dmitry Baryshkov Signed-off-by: Herbert Xu --- drivers/crypto/qcom-rng.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index c670d7d0c11e..6496b075a48d 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -196,7 +196,7 @@ static int qcom_rng_probe(struct platform_device *pdev) if (IS_ERR(rng->clk)) return PTR_ERR(rng->clk); - rng->of_data = (struct qcom_rng_of_data *)of_device_get_match_data(&pdev->dev); + rng->of_data = (struct qcom_rng_of_data *)device_get_match_data(&pdev->dev); qcom_rng_dev = rng; ret = crypto_register_rng(&qcom_rng_alg); @@ -247,7 +247,7 @@ static struct qcom_rng_of_data qcom_trng_of_data = { }; static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = { - { .id = "QCOM8160", .driver_data = 1 }, + { .id = "QCOM8160", .driver_data = (kernel_ulong_t)&qcom_prng_ee_of_data }, {} }; MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match); From f29ca8f762d19f7e26913ee49325806cb55f2d8f Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Thu, 5 Sep 2024 20:25:21 -0400 Subject: [PATCH 93/96] crypto: qcom-rng - rename *_of_data to *_match_data The qcom-rng driver supports both ACPI and device tree based systems. Let's rename all instances of *of_data to *match_data so that it's not implied that this driver only supports device tree-based systems. Signed-off-by: Brian Masney Reviewed-by: Dmitry Baryshkov Signed-off-by: Herbert Xu --- drivers/crypto/qcom-rng.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 6496b075a48d..09419e79e34c 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -36,14 +36,14 @@ struct qcom_rng { void __iomem *base; struct clk *clk; struct hwrng hwrng; - struct qcom_rng_of_data *of_data; + struct qcom_rng_match_data *match_data; }; struct qcom_rng_ctx { struct qcom_rng *rng; }; -struct qcom_rng_of_data { +struct qcom_rng_match_data { bool skip_init; bool hwrng_support; }; @@ -155,7 +155,7 @@ static int qcom_rng_init(struct crypto_tfm *tfm) ctx->rng = qcom_rng_dev; - if (!ctx->rng->of_data->skip_init) + if (!ctx->rng->match_data->skip_init) return qcom_rng_enable(ctx->rng); return 0; @@ -196,7 +196,7 @@ static int qcom_rng_probe(struct platform_device *pdev) if (IS_ERR(rng->clk)) return PTR_ERR(rng->clk); - rng->of_data = (struct qcom_rng_of_data *)device_get_match_data(&pdev->dev); + rng->match_data = (struct qcom_rng_match_data *)device_get_match_data(&pdev->dev); qcom_rng_dev = rng; ret = crypto_register_rng(&qcom_rng_alg); @@ -206,7 +206,7 @@ static int qcom_rng_probe(struct platform_device *pdev) return ret; } - if (rng->of_data->hwrng_support) { + if (rng->match_data->hwrng_support) { rng->hwrng.name = "qcom_hwrng"; rng->hwrng.read = qcom_hwrng_read; rng->hwrng.quality = QCOM_TRNG_QUALITY; @@ -231,31 +231,31 @@ static void qcom_rng_remove(struct platform_device *pdev) qcom_rng_dev = NULL; } -static struct qcom_rng_of_data qcom_prng_of_data = { +static struct qcom_rng_match_data qcom_prng_match_data = { .skip_init = false, .hwrng_support = false, }; -static struct qcom_rng_of_data qcom_prng_ee_of_data = { +static struct qcom_rng_match_data qcom_prng_ee_match_data = { .skip_init = true, .hwrng_support = false, }; -static struct qcom_rng_of_data qcom_trng_of_data = { +static struct qcom_rng_match_data qcom_trng_match_data = { .skip_init = true, .hwrng_support = true, }; static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = { - { .id = "QCOM8160", .driver_data = (kernel_ulong_t)&qcom_prng_ee_of_data }, + { .id = "QCOM8160", .driver_data = (kernel_ulong_t)&qcom_prng_ee_match_data }, {} }; MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match); static const struct of_device_id __maybe_unused qcom_rng_of_match[] = { - { .compatible = "qcom,prng", .data = &qcom_prng_of_data }, - { .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_of_data }, - { .compatible = "qcom,trng", .data = &qcom_trng_of_data }, + { .compatible = "qcom,prng", .data = &qcom_prng_match_data }, + { .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_match_data }, + { .compatible = "qcom,trng", .data = &qcom_trng_match_data }, {} }; MODULE_DEVICE_TABLE(of, qcom_rng_of_match); From ca459e5f826f262f044bda85ede8460af7f4bec9 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Fri, 6 Sep 2024 14:40:02 +0800 Subject: [PATCH 94/96] crypto: mips/crc32 - Clean up useless assignment operations When entering the "len & sizeof(u32)" branch, len must be less than 8. So after one operation, len must be less than 4. At this time, "len -= sizeof(u32)" is not necessary for 64-bit CPUs. After that, replace `while' loops with equivalent `for' to make the code structure a little bit better by the way. Suggested-by: Maciej W. Rozycki Link: https://lore.kernel.org/all/alpine.DEB.2.21.2406281713040.43454@angie.orcam.me.uk/ Suggested-by: Herbert Xu Link: https://lore.kernel.org/all/ZtqZpzMH_qMQqzyc@gondor.apana.org.au/ Signed-off-by: Guan Wentao Signed-off-by: WangYuli Signed-off-by: Herbert Xu --- arch/mips/crypto/crc32-mips.c | 64 +++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c index ec6d58008f8e..2a59b85f88aa 100644 --- a/arch/mips/crypto/crc32-mips.c +++ b/arch/mips/crypto/crc32-mips.c @@ -77,24 +77,26 @@ static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len) { u32 crc = crc_; -#ifdef CONFIG_64BIT - while (len >= sizeof(u64)) { - u64 value = get_unaligned_le64(p); + if (IS_ENABLED(CONFIG_64BIT)) { + for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) { + u64 value = get_unaligned_le64(p); - CRC32(crc, value, d); - p += sizeof(u64); - len -= sizeof(u64); - } + CRC32(crc, value, d); + } - if (len & sizeof(u32)) { -#else /* !CONFIG_64BIT */ - while (len >= sizeof(u32)) { -#endif - u32 value = get_unaligned_le32(p); + if (len & sizeof(u32)) { + u32 value = get_unaligned_le32(p); - CRC32(crc, value, w); - p += sizeof(u32); - len -= sizeof(u32); + CRC32(crc, value, w); + p += sizeof(u32); + } + } else { + for (; len >= sizeof(u32); len -= sizeof(u32)) { + u32 value = get_unaligned_le32(p); + + CRC32(crc, value, w); + p += sizeof(u32); + } } if (len & sizeof(u16)) { @@ -117,24 +119,26 @@ static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len) { u32 crc = crc_; -#ifdef CONFIG_64BIT - while (len >= sizeof(u64)) { - u64 value = get_unaligned_le64(p); + if (IS_ENABLED(CONFIG_64BIT)) { + for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) { + u64 value = get_unaligned_le64(p); - CRC32C(crc, value, d); - p += sizeof(u64); - len -= sizeof(u64); - } + CRC32(crc, value, d); + } - if (len & sizeof(u32)) { -#else /* !CONFIG_64BIT */ - while (len >= sizeof(u32)) { -#endif - u32 value = get_unaligned_le32(p); + if (len & sizeof(u32)) { + u32 value = get_unaligned_le32(p); - CRC32C(crc, value, w); - p += sizeof(u32); - len -= sizeof(u32); + CRC32(crc, value, w); + p += sizeof(u32); + } + } else { + for (; len >= sizeof(u32); len -= sizeof(u32)) { + u32 value = get_unaligned_le32(p); + + CRC32(crc, value, w); + p += sizeof(u32); + } } if (len & sizeof(u16)) { From e2b19a4840650ba1d679562d4a8959f3f6070064 Mon Sep 17 00:00:00 2001 From: Chen Yufan Date: Fri, 6 Sep 2024 18:43:24 +0800 Subject: [PATCH 95/96] crypto: camm/qi - Use ERR_CAST() to return error-valued pointer Instead of directly casting and returning (void *) pointer, use ERR_CAST to explicitly return an error-valued pointer. This makes the error handling more explicit and improves code clarity. Signed-off-by: Chen Yufan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 743ce50c14f2..13347dfecf7a 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -961,7 +961,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); if (IS_ERR(drv_ctx)) - return (struct aead_edesc *)drv_ctx; + return ERR_CAST(drv_ctx); /* allocate space for base edesc and hw desc commands, link tables */ edesc = qi_cache_alloc(flags); @@ -1271,7 +1271,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); if (IS_ERR(drv_ctx)) - return (struct skcipher_edesc *)drv_ctx; + return ERR_CAST(drv_ctx); src_nents = sg_nents_for_len(req->src, req->cryptlen); if (unlikely(src_nents < 0)) { From ce212d2afca47acd366a2e74c76fe82c31f785ab Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 10 Sep 2024 17:30:24 +0800 Subject: [PATCH 96/96] crypto: n2 - Set err to EINVAL if snprintf fails for hmac Return EINVAL if the snprintf check fails when constructing the algorithm names. Fixes: 8c20982caca4 ("crypto: n2 - Silence gcc format-truncation false positive warnings") Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202409090726.TP0WfY7p-lkp@intel.com/ Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 251e088a53df..b11545cc5cb7 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1353,6 +1353,7 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) ahash->setkey = n2_hmac_async_setkey; base = &ahash->halg.base; + err = -EINVAL; if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg) >= CRYPTO_MAX_ALG_NAME) goto out_free_p;