From 1532e31f5098a220c920303213017b11df777c0b Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Wed, 27 May 2020 16:21:28 +0100 Subject: [PATCH 001/157] crypto: qat - convert to SPDX License Identifiers Replace License Headers with SPDX License Identifiers. Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 48 +----------------- .../crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h | 48 +----------------- drivers/crypto/qat/qat_c3xxx/adf_drv.c | 48 +----------------- .../qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c | 48 +----------------- .../qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h | 48 +----------------- drivers/crypto/qat/qat_c3xxxvf/adf_drv.c | 48 +----------------- .../crypto/qat/qat_c62x/adf_c62x_hw_data.c | 48 +----------------- .../crypto/qat/qat_c62x/adf_c62x_hw_data.h | 48 +----------------- drivers/crypto/qat/qat_c62x/adf_drv.c | 48 +----------------- .../qat/qat_c62xvf/adf_c62xvf_hw_data.c | 48 +----------------- .../qat/qat_c62xvf/adf_c62xvf_hw_data.h | 48 +----------------- drivers/crypto/qat/qat_c62xvf/adf_drv.c | 48 +----------------- .../crypto/qat/qat_common/adf_accel_devices.h | 48 +----------------- .../crypto/qat/qat_common/adf_accel_engine.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_admin.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_aer.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_cfg.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_cfg.h | 48 +----------------- .../crypto/qat/qat_common/adf_cfg_common.h | 48 +----------------- .../crypto/qat/qat_common/adf_cfg_strings.h | 48 +----------------- drivers/crypto/qat/qat_common/adf_cfg_user.h | 48 +----------------- .../crypto/qat/qat_common/adf_common_drv.h | 48 +----------------- drivers/crypto/qat/qat_common/adf_ctl_drv.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_dev_mgr.c | 48 +----------------- .../crypto/qat/qat_common/adf_hw_arbiter.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_init.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_isr.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_pf2vf_msg.c | 49 +------------------ drivers/crypto/qat/qat_common/adf_pf2vf_msg.h | 48 +----------------- drivers/crypto/qat/qat_common/adf_sriov.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_transport.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_transport.h | 48 +----------------- .../qat_common/adf_transport_access_macros.h | 48 +----------------- .../qat/qat_common/adf_transport_debug.c | 48 +----------------- .../qat/qat_common/adf_transport_internal.h | 48 +----------------- drivers/crypto/qat/qat_common/adf_vf2pf_msg.c | 48 +----------------- drivers/crypto/qat/qat_common/adf_vf_isr.c | 48 +----------------- drivers/crypto/qat/qat_common/icp_qat_fw.h | 48 +----------------- .../qat/qat_common/icp_qat_fw_init_admin.h | 48 +----------------- drivers/crypto/qat/qat_common/icp_qat_fw_la.h | 48 +----------------- .../qat/qat_common/icp_qat_fw_loader_handle.h | 48 +----------------- .../crypto/qat/qat_common/icp_qat_fw_pke.h | 48 +----------------- drivers/crypto/qat/qat_common/icp_qat_hal.h | 48 +----------------- drivers/crypto/qat/qat_common/icp_qat_hw.h | 48 +----------------- drivers/crypto/qat/qat_common/icp_qat_uclo.h | 48 +----------------- drivers/crypto/qat/qat_common/qat_algs.c | 48 +----------------- drivers/crypto/qat/qat_common/qat_asym_algs.c | 49 +------------------ drivers/crypto/qat/qat_common/qat_crypto.c | 48 +----------------- drivers/crypto/qat/qat_common/qat_crypto.h | 48 +----------------- drivers/crypto/qat/qat_common/qat_hal.c | 48 +----------------- drivers/crypto/qat/qat_common/qat_uclo.c | 48 +----------------- .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 48 +----------------- .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | 48 +----------------- drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 48 +----------------- .../qat_dh895xccvf/adf_dh895xccvf_hw_data.c | 48 +----------------- .../qat_dh895xccvf/adf_dh895xccvf_hw_data.h | 48 +----------------- drivers/crypto/qat/qat_dh895xccvf/adf_drv.c | 48 +----------------- 57 files changed, 114 insertions(+), 2624 deletions(-) diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 6bc68bc00d76..aee494d3da52 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h index afc9a0a86747..8b5dd2c94ebf 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_C3XXX_HW_DATA_H_ #define ADF_C3XXX_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index d937cc7248a5..020d099409e5 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index d2d0ae445fd8..d2fedbd7113c 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h index 934f216acf39..7945a9cd1c60 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_C3XXXVF_HW_DATA_H_ #define ADF_C3XXXVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 1dc5ac859f7b..11039fe55f61 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c index 618cec360b39..844ad5ed33fc 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h index 17a8a32d5c63..88504d2bf30d 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_C62X_HW_DATA_H_ #define ADF_C62X_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 2bc06c89d2fe..4ba9c14383af 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 38e4bc04f407..29fd3f1091ab 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h index a28d83e77422..a6c04cf7a43c 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_C62XVF_HW_DATA_H_ #define ADF_C62XVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index a68358b31292..b8b021d54bb5 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 33f0a6251e38..b3500bc4a558 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_ACCEL_DEVICES_H_ #define ADF_ACCEL_DEVICES_H_ #include diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index a42fc42704be..00d4d13e33e8 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include "adf_cfg.h" diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index d28cba34773e..e363d13dcd78 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index f5e960d23a7a..35cdc0fbbe40 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index 5c7fdb0fc53d..ac462796cefc 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h index 6a9c6f6b5ec9..376cde61a60e 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.h +++ b/drivers/crypto/qat/qat_common/adf_cfg.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_H_ #define ADF_CFG_H_ diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 1211261de7c2..e818033c1012 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_COMMON_H_ #define ADF_CFG_COMMON_H_ diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h index 7632ed0f25c5..314790f5b0af 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_STRINGS_H_ #define ADF_CFG_STRINGS_H_ diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h index b5484bfa6996..86b4a3e3f355 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_user.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_USER_H_ #define ADF_CFG_USER_H_ diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index d78f8d5c89c3..ae91203bb617 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_DRV_H #define ADF_DRV_H diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index ef0e482ee04f..4d4589d4b64c 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 2d06409bd3c4..e663054a11d5 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include "adf_cfg.h" diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index d7dd18d9bef8..d4162783f970 100644 --- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_transport_internal.h" diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 26556c713049..42029153408e 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index cd1cdf5305bc..36136f7db509 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index b3875fdf6cd7..519fd5acf713 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -1,50 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h index 5acd531a11ff..0690c031bfce 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_PF2VF_MSG_H #define ADF_PF2VF_MSG_H diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index b36d8653b1ba..8827aa139f96 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 2136cbe4bf6c..4625881a8832 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include "adf_accel_devices.h" #include "adf_transport_internal.h" diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h index 386485bd9c95..0faea1032b93 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.h +++ b/drivers/crypto/qat/qat_common/adf_transport.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_H #define ADF_TRANSPORT_H diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 80e02a2a0a09..1a943f948100 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_ACCESS_MACROS_H #define ADF_TRANSPORT_ACCESS_MACROS_H diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c index e794e9d97b2c..2a2eccbf56ec 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index bb883368ac01..fc610ea31869 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_INTRN_H #define ADF_TRANSPORT_INTRN_H diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index cd5f37dffe8a..2c98fb63f7b7 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_pf2vf_msg.h" diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 4a73fc70f7a9..c4a44dc6af3e 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h index 46747f01b1d1..1b9a11f574ee 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_H_ #define _ICP_QAT_FW_H_ #include diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h index 72a59faa9005..2024babd7fc7 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_INIT_ADMIN_H_ #define _ICP_QAT_FW_INIT_ADMIN_H_ diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h index c8d26697e8ea..ee0a0c944b31 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_LA_H_ #define _ICP_QAT_FW_LA_H_ #include "icp_qat_fw.h" diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h index 2ffef3e4fd68..3e8e291cd122 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_FW_LOADER_HANDLE_H__ #define __ICP_QAT_FW_LOADER_HANDLE_H__ #include "icp_qat_uclo.h" diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h index 0d7a9b51ce9f..4354a68de327 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_PKE_ #define _ICP_QAT_FW_PKE_ diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h index 7187917533d0..c0e9fc0c93dd 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hal.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_HAL_H #define __ICP_QAT_HAL_H #include "icp_qat_fw_loader_handle.h" diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h index 121d5e6e46ca..1f1e7465f3c1 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 5d1ee7e53492..dbb58eacc395 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_UCLO_H__ #define __ICP_QAT_UCLO_H__ diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index e14d3dd291f0..9df4aebf37ca 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 692a7aaee749..6cd67b06979b 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -1,50 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index fb504cee0305..ab621b7dbd20 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include "adf_accel_devices.h" diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index 300bb919a33a..12682d1e9f5f 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _QAT_CRYPTO_INSTANCE_H_ #define _QAT_CRYPTO_INSTANCE_H_ diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index ff149e176f64..9a80da0b3b28 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 6bd8f6a2a24f..3f079e336f64 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 1dfcab317bed..0461040303a2 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 092f7353ed23..082a04466dca 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_DH895x_HW_DATA_H_ #define ADF_DH895x_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index b11bf8c0e683..4e877b75822b 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index a3b4dd8099a7..5246f0524ca3 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include #include #include diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h index 6ddc19bd4410..2bfcc67f8f39 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_DH895XVF_HW_DATA_H_ #define ADF_DH895XVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 1b762eefc6c1..7d6e1db272c2 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include #include #include From f3c802a1f30013f8f723b62d7fa49eb9e991da23 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 30 May 2020 00:23:49 +1000 Subject: [PATCH 002/157] crypto: algif_aead - Only wake up when ctx->more is zero AEAD does not support partial requests so we must not wake up while ctx->more is set. In order to distinguish between the case of no data sent yet and a zero-length request, a new init flag has been added to ctx. SKCIPHER has also been modified to ensure that at least a block of data is available if there is more data to come. Fixes: 2d97591ef43d ("crypto: af_alg - consolidation of...") Signed-off-by: Herbert Xu --- crypto/af_alg.c | 11 ++++++++--- crypto/algif_aead.c | 4 ++-- crypto/algif_skcipher.c | 4 ++-- include/crypto/if_alg.h | 4 +++- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 28fc323e3fe3..9fcb91ea10c4 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -843,7 +847,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { + if (ctx->init && (init || !ctx->more)) { err = -EINVAL; goto unlock; } @@ -854,6 +858,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, memcpy(ctx->iv, con.iv->iv, ivsize); ctx->aead_assoclen = con.aead_assoclen; + ctx->init = true; } while (size) { diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0ae000a61c7f..d48d2156e621 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ec5567c87a6d..a51ba22fef58 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 088c1ded2714..ee6412314f8f 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -135,6 +135,7 @@ struct af_alg_async_req { * SG? * @enc: Cryptographic operation to be performed when * recvmsg is invoked. + * @init: True if metadata has been sent. * @len: Length of memory allocated for this data structure. */ struct af_alg_ctx { @@ -151,6 +152,7 @@ struct af_alg_ctx { bool more; bool merge; bool enc; + bool init; unsigned int len; }; @@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset); void af_alg_wmem_wakeup(struct sock *sk); -int af_alg_wait_for_data(struct sock *sk, unsigned flags); +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize); ssize_t af_alg_sendpage(struct socket *sock, struct page *page, From 50f362b2d9c4c5cd49feffe49fd833a629f59c38 Mon Sep 17 00:00:00 2001 From: Olivier Sobrie Date: Mon, 1 Jun 2020 16:27:39 +0200 Subject: [PATCH 003/157] dt-bindings: rng: document Silex Insight BA431 hwrng This patch documents the device tree bindings of the BA431 hardware random number generator. This IP is for instance present in the Viper OEM boards sold by Silex Insight. Signed-off-by: Olivier Sobrie Reviewed-by: Rob Herring Signed-off-by: Herbert Xu --- .../bindings/rng/silex-insight,ba431-rng.yaml | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml diff --git a/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml b/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml new file mode 100644 index 000000000000..48ab82abf50e --- /dev/null +++ b/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/silex-insight,ba431-rng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Silex Insight BA431 RNG bindings + +description: | + The BA431 hardware random number generator is an IP that is FIPS-140-2/3 + certified. + +maintainers: + - Olivier Sobrie + +properties: + compatible: + const: silex-insight,ba431-rng + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + rng@42800000 { + compatible = "silex-insight,ba431-rng"; + reg = <0x42800000 0x1000>; + }; + +... From 0289e9be5dc26d84dda6fc5492f08ca1ff599744 Mon Sep 17 00:00:00 2001 From: Olivier Sobrie Date: Mon, 1 Jun 2020 16:27:40 +0200 Subject: [PATCH 004/157] hwrng: ba431 - add support for BA431 hwrng Silex insight BA431 is an IP designed to generate random numbers that can be integrated in various FPGA. This driver adds support for it through the hwrng interface. This driver is used in Silex Insight Viper OEM boards. Signed-off-by: Olivier Sobrie Signed-off-by: Waleed Ziad Acked-by: Arnd Bergmann Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 12 ++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/ba431-rng.c | 234 +++++++++++++++++++++++++++++ 3 files changed, 247 insertions(+) create mode 100644 drivers/char/hw_random/ba431-rng.c diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 0ad17efc96df..7b876dfdbaaf 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -74,6 +74,18 @@ config HW_RANDOM_ATMEL If unsure, say Y. +config HW_RANDOM_BA431 + tristate "Silex Insight BA431 Random Number Generator support" + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware based on Silex Insight BA431 IP. + + To compile this driver as a module, choose M here: the + module will be called ba431-rng. + + If unsure, say Y. + config HW_RANDOM_BCM2835 tristate "Broadcom BCM2835/BCM63xx Random Number Generator support" depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \ diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 2c6724735345..4ed9feb094a1 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o +obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o n2-rng-y := n2-drv.o n2-asm.o diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c new file mode 100644 index 000000000000..a39e3abf50b9 --- /dev/null +++ b/drivers/char/hw_random/ba431-rng.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Silex Insight + +#include +#include +#include +#include +#include +#include +#include +#include + +#define BA431_RESET_DELAY 1 /* usec */ +#define BA431_RESET_READ_STATUS_TIMEOUT 1000 /* usec */ +#define BA431_RESET_READ_STATUS_INTERVAL 10 /* usec */ +#define BA431_READ_RETRY_INTERVAL 1 /* usec */ + +#define BA431_REG_CTRL 0x00 +#define BA431_REG_FIFO_LEVEL 0x04 +#define BA431_REG_STATUS 0x30 +#define BA431_REG_FIFODATA 0x80 + +#define BA431_CTRL_ENABLE BIT(0) +#define BA431_CTRL_SOFTRESET BIT(8) + +#define BA431_STATUS_STATE_MASK (BIT(1) | BIT(2) | BIT(3)) +#define BA431_STATUS_STATE_OFFSET 1 + +enum ba431_state { + BA431_STATE_RESET, + BA431_STATE_STARTUP, + BA431_STATE_FIFOFULLON, + BA431_STATE_FIFOFULLOFF, + BA431_STATE_RUNNING, + BA431_STATE_ERROR +}; + +struct ba431_trng { + struct device *dev; + void __iomem *base; + struct hwrng rng; + atomic_t reset_pending; + struct work_struct reset_work; +}; + +static inline u32 ba431_trng_read_reg(struct ba431_trng *ba431, u32 reg) +{ + return ioread32(ba431->base + reg); +} + +static inline void ba431_trng_write_reg(struct ba431_trng *ba431, u32 reg, + u32 val) +{ + iowrite32(val, ba431->base + reg); +} + +static inline enum ba431_state ba431_trng_get_state(struct ba431_trng *ba431) +{ + u32 status = ba431_trng_read_reg(ba431, BA431_REG_STATUS); + + return (status & BA431_STATUS_STATE_MASK) >> BA431_STATUS_STATE_OFFSET; +} + +static int ba431_trng_is_in_error(struct ba431_trng *ba431) +{ + enum ba431_state state = ba431_trng_get_state(ba431); + + if ((state < BA431_STATE_STARTUP) || + (state >= BA431_STATE_ERROR)) + return 1; + + return 0; +} + +static int ba431_trng_reset(struct ba431_trng *ba431) +{ + int ret; + + /* Disable interrupts, random generation and enable the softreset */ + ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_SOFTRESET); + udelay(BA431_RESET_DELAY); + ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_ENABLE); + + /* Wait until the state changed */ + if (readx_poll_timeout(ba431_trng_is_in_error, ba431, ret, !ret, + BA431_RESET_READ_STATUS_INTERVAL, + BA431_RESET_READ_STATUS_TIMEOUT)) { + dev_err(ba431->dev, "reset failed (state: %d)\n", + ba431_trng_get_state(ba431)); + return -ETIMEDOUT; + } + + dev_info(ba431->dev, "reset done\n"); + + return 0; +} + +static void ba431_trng_reset_work(struct work_struct *work) +{ + struct ba431_trng *ba431 = container_of(work, struct ba431_trng, + reset_work); + ba431_trng_reset(ba431); + atomic_set(&ba431->reset_pending, 0); +} + +static void ba431_trng_schedule_reset(struct ba431_trng *ba431) +{ + if (atomic_cmpxchg(&ba431->reset_pending, 0, 1)) + return; + + schedule_work(&ba431->reset_work); +} + +static int ba431_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); + u32 *data = buf; + unsigned int level, i; + int n = 0; + + while (max > 0) { + level = ba431_trng_read_reg(ba431, BA431_REG_FIFO_LEVEL); + if (!level) { + if (ba431_trng_is_in_error(ba431)) { + ba431_trng_schedule_reset(ba431); + break; + } + + if (!wait) + break; + + udelay(BA431_READ_RETRY_INTERVAL); + continue; + } + + i = level; + do { + data[n++] = ba431_trng_read_reg(ba431, + BA431_REG_FIFODATA); + max -= sizeof(*data); + } while (--i && (max > 0)); + + if (ba431_trng_is_in_error(ba431)) { + n -= (level - i); + ba431_trng_schedule_reset(ba431); + break; + } + } + + n *= sizeof(data); + return (n || !wait) ? n : -EIO; +} + +static void ba431_trng_cleanup(struct hwrng *rng) +{ + struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); + + ba431_trng_write_reg(ba431, BA431_REG_CTRL, 0); + cancel_work_sync(&ba431->reset_work); +} + +static int ba431_trng_init(struct hwrng *rng) +{ + struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); + + return ba431_trng_reset(ba431); +} + +static int ba431_trng_probe(struct platform_device *pdev) +{ + struct ba431_trng *ba431; + struct resource *res; + int ret; + + ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL); + if (!ba431) + return -ENOMEM; + + ba431->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ba431->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ba431->base)) + return PTR_ERR(ba431->base); + + atomic_set(&ba431->reset_pending, 0); + INIT_WORK(&ba431->reset_work, ba431_trng_reset_work); + ba431->rng.name = pdev->name; + ba431->rng.init = ba431_trng_init; + ba431->rng.cleanup = ba431_trng_cleanup; + ba431->rng.read = ba431_trng_read; + + platform_set_drvdata(pdev, ba431); + + ret = hwrng_register(&ba431->rng); + if (ret) { + dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret); + return ret; + } + + dev_info(&pdev->dev, "BA431 TRNG registered\n"); + + return 0; +} + +static int ba431_trng_remove(struct platform_device *pdev) +{ + struct ba431_trng *ba431 = platform_get_drvdata(pdev); + + hwrng_unregister(&ba431->rng); + + return 0; +} + +static const struct of_device_id ba431_trng_dt_ids[] = { + { .compatible = "silex-insight,ba431-rng", .data = NULL }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids); + +static struct platform_driver ba431_trng_driver = { + .driver = { + .name = "ba431-rng", + .of_match_table = ba431_trng_dt_ids, + }, + .probe = ba431_trng_probe, + .remove = ba431_trng_remove, +}; + +module_platform_driver(ba431_trng_driver); + +MODULE_AUTHOR("Olivier Sobrie "); +MODULE_DESCRIPTION("TRNG driver for Silex Insight BA431"); +MODULE_LICENSE("GPL"); From 58e5b0157e58fc0e04a7fc806fed53893ddcc28b Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Mon, 1 Jun 2020 16:07:26 -0700 Subject: [PATCH 005/157] crypto: caam - add clock info for VFxxx SoCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a small bit of plumbing necessary to use CAAM on VFxxx SoCs. Signed-off-by: Andrey Smirnov Cc: Chris Healy Cc: Fabio Estevam Cc: linux-imx@nxp.com Cc: linux-kernel@vger.kernel.org Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index f3d20b7645e0..6e0b0ddbce31 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -527,11 +527,21 @@ static const struct caam_imx_data caam_imx6ul_data = { .num_clks = ARRAY_SIZE(caam_imx6ul_clks), }; +static const struct clk_bulk_data caam_vf610_clks[] = { + { .id = "ipg" }, +}; + +static const struct caam_imx_data caam_vf610_data = { + .clks = caam_vf610_clks, + .num_clks = ARRAY_SIZE(caam_vf610_clks), +}; + static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, { .soc_id = "i.MX6*", .data = &caam_imx6_data }, { .soc_id = "i.MX7*", .data = &caam_imx7_data }, { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, + { .soc_id = "VF*", .data = &caam_vf610_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } }; From 7f94adf218d8810abe4d46ef97b17070ab6dff61 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 2 Jun 2020 16:54:09 +0300 Subject: [PATCH 006/157] crypto: hisilicon - allow smaller reads in debugfs Originally this code rejected any read less than 256 bytes. There is no need for this artificial limit. We should just use the normal helper functions to read a string from the kernel. Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 9bb263cec6c3..13ccb9e29a2e 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1064,19 +1064,10 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, char buf[QM_DBG_READ_LEN]; int len; - if (*pos) - return 0; + len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); - if (count < QM_DBG_READ_LEN) - return -ENOSPC; - - len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", - "Please echo help to cmd to get help information"); - - if (copy_to_user(buffer, buf, len)) - return -EFAULT; - - return (*pos = len); + return simple_read_from_buffer(buffer, count, pos, buf, len); } static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, @@ -2691,24 +2682,12 @@ static ssize_t qm_status_read(struct file *filp, char __user *buffer, { struct hisi_qm *qm = filp->private_data; char buf[QM_DBG_READ_LEN]; - int val, cp_len, len; - - if (*pos) - return 0; - - if (count < QM_DBG_READ_LEN) - return -ENOSPC; + int val, len; val = atomic_read(&qm->status.flags); - len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); - if (!len) - return -EFAULT; + len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); - cp_len = copy_to_user(buffer, buf, len); - if (cp_len) - return -EFAULT; - - return (*pos = len); + return simple_read_from_buffer(buffer, count, pos, buf, len); } static const struct file_operations qm_status_fops = { From 060ce5037d5fe22fcb2102f4c3a3b942c9edbc9d Mon Sep 17 00:00:00 2001 From: Andrei Botila Date: Wed, 3 Jun 2020 11:47:04 +0300 Subject: [PATCH 007/157] crypto: caam/qi2 - add support for dpseci_reset() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for dpseci_reset() command for DPSECI objects. For DPSECI DPAA2 objects with version lower than v5.4 reset command was broken in MC f/w. Signed-off-by: Andrei Botila Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 15 +++++++++++++++ drivers/crypto/caam/dpseci.c | 18 ++++++++++++++++++ drivers/crypto/caam/dpseci.h | 2 ++ drivers/crypto/caam/dpseci_cmd.h | 1 + 4 files changed, 36 insertions(+) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 28669cbecf77..35fbb3a74cb4 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -4697,6 +4697,13 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); + int err; + + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) + dev_err(dev, "dpseci_reset() failed\n"); + } dpaa2_dpseci_congestion_free(priv); dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); @@ -4894,6 +4901,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver); + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) { + dev_err(dev, "dpseci_reset() failed\n"); + goto err_get_vers; + } + } + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, &priv->dpseci_attr); if (err) { diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c index 8a68531ded0b..039df6c5790c 100644 --- a/drivers/crypto/caam/dpseci.c +++ b/drivers/crypto/caam/dpseci.c @@ -103,6 +103,24 @@ int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) return mc_send_command(mc_io, &cmd); } +/** + * dpseci_reset() - Reset the DPSECI, returns the object to initial state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * + * Return: '0' on success, error code otherwise + */ +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, + cmd_flags, + token); + return mc_send_command(mc_io, &cmd); +} + /** * dpseci_is_enabled() - Check if the DPSECI is enabled. * @mc_io: Pointer to MC portal's I/O object diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h index 4550e134d166..6dcd9be8144b 100644 --- a/drivers/crypto/caam/dpseci.h +++ b/drivers/crypto/caam/dpseci.h @@ -59,6 +59,8 @@ int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int *en); diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h index 6ab77ead6e3d..71a007c85adb 100644 --- a/drivers/crypto/caam/dpseci_cmd.h +++ b/drivers/crypto/caam/dpseci_cmd.h @@ -33,6 +33,7 @@ #define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002) #define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003) #define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004) +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005) #define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006) #define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194) From 2bfd22766d004102e84c0d40793e1c73d73d11a5 Mon Sep 17 00:00:00 2001 From: Wojciech Ziemba Date: Wed, 3 Jun 2020 18:33:44 +0100 Subject: [PATCH 008/157] crypto: qat - replace user types with kernel u types Kernel source code should not include stdint.h types. This patch replaces uintXX_t types with respective ones defined in kernel headers. Signed-off-by: Wojciech Ziemba Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../crypto/qat/qat_common/adf_accel_devices.h | 54 ++++++++-------- .../crypto/qat/qat_common/adf_accel_engine.c | 4 +- drivers/crypto/qat/qat_common/adf_aer.c | 2 +- .../crypto/qat/qat_common/adf_common_drv.h | 12 ++-- drivers/crypto/qat/qat_common/adf_ctl_drv.c | 4 +- drivers/crypto/qat/qat_common/adf_dev_mgr.c | 8 +-- drivers/crypto/qat/qat_common/adf_transport.c | 62 +++++++++---------- drivers/crypto/qat/qat_common/adf_transport.h | 4 +- .../qat_common/adf_transport_access_macros.h | 6 +- .../qat/qat_common/adf_transport_internal.h | 20 +++--- drivers/crypto/qat/qat_common/icp_qat_uclo.h | 6 +- drivers/crypto/qat/qat_common/qat_algs.c | 54 ++++++++-------- drivers/crypto/qat/qat_common/qat_asym_algs.c | 12 ++-- drivers/crypto/qat/qat_common/qat_hal.c | 40 ++++++------ drivers/crypto/qat/qat_common/qat_uclo.c | 20 +++--- .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 26 ++++---- 16 files changed, 167 insertions(+), 167 deletions(-) diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index b3500bc4a558..c1db8c26afb6 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -59,8 +59,8 @@ struct adf_accel_pci { struct pci_dev *pci_dev; struct adf_accel_msix msix_entries; struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; - uint8_t revid; - uint8_t sku; + u8 revid; + u8 sku; } __packed; enum dev_state { @@ -100,7 +100,7 @@ static inline const char *get_sku_info(enum dev_sku_info info) struct adf_hw_device_class { const char *name; const enum adf_device_type type; - uint32_t instances; + u32 instances; } __packed; struct adf_cfg_device_data; @@ -110,15 +110,15 @@ struct adf_etr_ring_data; struct adf_hw_device_data { struct adf_hw_device_class *dev_class; - uint32_t (*get_accel_mask)(uint32_t fuse); - uint32_t (*get_ae_mask)(uint32_t fuse); - uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_num_aes)(struct adf_hw_device_data *self); - uint32_t (*get_num_accels)(struct adf_hw_device_data *self); - uint32_t (*get_pf2vf_offset)(uint32_t i); - uint32_t (*get_vintmsk_offset)(uint32_t i); + u32 (*get_accel_mask)(u32 fuse); + u32 (*get_ae_mask)(u32 fuse); + u32 (*get_sram_bar_id)(struct adf_hw_device_data *self); + u32 (*get_misc_bar_id)(struct adf_hw_device_data *self); + u32 (*get_etr_bar_id)(struct adf_hw_device_data *self); + u32 (*get_num_aes)(struct adf_hw_device_data *self); + u32 (*get_num_accels)(struct adf_hw_device_data *self); + u32 (*get_pf2vf_offset)(u32 i); + u32 (*get_vintmsk_offset)(u32 i); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); int (*alloc_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev); @@ -129,25 +129,25 @@ struct adf_hw_device_data { int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, - const uint32_t **cfg); + const u32 **cfg); void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*enable_ints)(struct adf_accel_dev *accel_dev); int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); void (*reset_device)(struct adf_accel_dev *accel_dev); const char *fw_name; const char *fw_mmp_name; - uint32_t fuses; - uint32_t accel_capabilities_mask; - uint32_t instance_id; - uint16_t accel_mask; - uint16_t ae_mask; - uint16_t tx_rings_mask; - uint8_t tx_rx_gap; - uint8_t num_banks; - uint8_t num_accel; - uint8_t num_logical_accel; - uint8_t num_engines; - uint8_t min_iov_compat_ver; + u32 fuses; + u32 accel_capabilities_mask; + u32 instance_id; + u16 accel_mask; + u16 ae_mask; + u16 tx_rings_mask; + u8 tx_rx_gap; + u8 num_banks; + u8 num_accel; + u8 num_logical_accel; + u8 num_engines; + u8 min_iov_compat_ver; } __packed; /* CSR write macro */ @@ -204,8 +204,8 @@ struct adf_accel_dev { struct tasklet_struct pf2vf_bh_tasklet; struct mutex vf2pf_lock; /* protect CSR access */ struct completion iov_msg_completion; - uint8_t compatible; - uint8_t pf_version; + u8 compatible; + u8 pf_version; } vf; }; bool is_vf; diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index 00d4d13e33e8..c8ad85b882be 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -74,7 +74,7 @@ int adf_ae_start(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); if (!hw_data->fw_name) return 0; @@ -95,7 +95,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); if (!hw_data->fw_name) return 0; diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 35cdc0fbbe40..32102e27e559 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -42,7 +42,7 @@ void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); struct pci_dev *parent = pdev->bus->self; - uint16_t bridge_ctl = 0; + u16 bridge_ctl = 0; if (!parent) parent = pdev; diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index ae91203bb617..ebfcb4ea618d 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -79,11 +79,11 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); struct list_head *adf_devmgr_get_head(void); -struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); +struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id); struct adf_accel_dev *adf_devmgr_get_first(void); struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev); -int adf_devmgr_verify_id(uint32_t id); -void adf_devmgr_get_num_dev(uint32_t *num); +int adf_devmgr_verify_id(u32 id); +void adf_devmgr_get_num_dev(u32 *num); int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); int adf_dev_started(struct adf_accel_dev *accel_dev); int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); @@ -154,7 +154,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int upc); void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword); + unsigned int words_num, u64 *uword); void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uword_addr, unsigned int words_num, unsigned int *data); @@ -189,9 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - uint32_t vf_mask); + u32 vf_mask); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - uint32_t vf_mask); + u32 vf_mask); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 4d4589d4b64c..71d0c44aacca 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -226,7 +226,7 @@ static int adf_ctl_is_device_in_use(int id) return 0; } -static void adf_ctl_stop_devices(uint32_t id) +static void adf_ctl_stop_devices(u32 id) { struct adf_accel_dev *accel_dev; @@ -330,7 +330,7 @@ out: static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, unsigned long arg) { - uint32_t num_devices = 0; + u32 num_devices = 0; adf_devmgr_get_num_dev(&num_devices); if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index e663054a11d5..72753af056b3 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -8,7 +8,7 @@ static LIST_HEAD(accel_table); static LIST_HEAD(vfs_table); static DEFINE_MUTEX(table_lock); -static uint32_t num_devices; +static u32 num_devices; static u8 id_map[ADF_MAX_DEVICES]; struct vf_id_map { @@ -311,7 +311,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) } EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); -struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) +struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id) { struct list_head *itr; int real_id; @@ -336,7 +336,7 @@ unlock: return NULL; } -int adf_devmgr_verify_id(uint32_t id) +int adf_devmgr_verify_id(u32 id) { if (id == ADF_CFG_ALL_DEVICES) return 0; @@ -363,7 +363,7 @@ static int adf_get_num_dettached_vfs(void) return vfs; } -void adf_devmgr_get_num_dev(uint32_t *num) +void adf_devmgr_get_num_dev(u32 *num) { *num = num_devices - adf_get_num_dettached_vfs(); } diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 4625881a8832..2ad774017200 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -7,22 +7,22 @@ #include "adf_cfg.h" #include "adf_common_drv.h" -static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) +static inline u32 adf_modulo(u32 data, u32 shift) { - uint32_t div = data >> shift; - uint32_t mult = div << shift; + u32 div = data >> shift; + u32 mult = div << shift; return data - mult; } -static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) +static inline int adf_check_ring_alignment(u64 addr, u64 size) { if (((size - 1) & addr) != 0) return -EFAULT; return 0; } -static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) +static int adf_verify_ring_size(u32 msg_size, u32 msg_num) { int i = ADF_MIN_RING_SIZE; @@ -33,7 +33,7 @@ static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) return ADF_DEFAULT_RING_SIZE; } -static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring) { spin_lock(&bank->lock); if (bank->ring_mask & (1 << ring)) { @@ -45,14 +45,14 @@ static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) return 0; } -static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring) { spin_lock(&bank->lock); bank->ring_mask &= ~(1 << ring); spin_unlock(&bank->lock); } -static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) { spin_lock_bh(&bank->lock); bank->irq_mask |= (1 << ring); @@ -62,7 +62,7 @@ static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) bank->irq_coalesc_timer); } -static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) { spin_lock_bh(&bank->lock); bank->irq_mask &= ~(1 << ring); @@ -70,7 +70,7 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); } -int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) +int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) { if (atomic_add_return(1, ring->inflights) > ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { @@ -92,18 +92,18 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) static int adf_handle_response(struct adf_etr_ring_data *ring) { - uint32_t msg_counter = 0; - uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); + u32 msg_counter = 0; + u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head); while (*msg != ADF_RING_EMPTY_SIG) { - ring->callback((uint32_t *)msg); + ring->callback((u32 *)msg); atomic_dec(ring->inflights); *msg = ADF_RING_EMPTY_SIG; ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); msg_counter++; - msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); + msg = (u32 *)((uintptr_t)ring->base_addr + ring->head); } if (msg_counter > 0) WRITE_CSR_RING_HEAD(ring->bank->csr_addr, @@ -114,7 +114,7 @@ static int adf_handle_response(struct adf_etr_ring_data *ring) static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size); + u32 ring_config = BUILD_RING_CONFIG(ring->ring_size); WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring_config); @@ -122,7 +122,7 @@ static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_config = + u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size, ADF_RING_NEAR_WATERMARK_512, ADF_RING_NEAR_WATERMARK_0); @@ -136,8 +136,8 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) struct adf_etr_bank_data *bank = ring->bank; struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint64_t ring_base; - uint32_t ring_size_bytes = + u64 ring_base; + u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); @@ -171,7 +171,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) static void adf_cleanup_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_size_bytes = + u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); @@ -184,8 +184,8 @@ static void adf_cleanup_ring(struct adf_etr_ring_data *ring) } int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - uint32_t bank_num, uint32_t num_msgs, - uint32_t msg_size, const char *ring_name, + u32 bank_num, u32 num_msgs, + u32 msg_size, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr) { @@ -193,7 +193,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, struct adf_etr_bank_data *bank; struct adf_etr_ring_data *ring; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; - uint32_t ring_num; + u32 ring_num; int ret; if (bank_num >= GET_MAX_BANKS(accel_dev)) { @@ -286,7 +286,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring) static void adf_ring_response_handler(struct adf_etr_bank_data *bank) { - uint32_t empty_rings, i; + u32 empty_rings, i; empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); empty_rings = ~empty_rings & bank->irq_mask; @@ -309,7 +309,7 @@ void adf_response_handler(uintptr_t bank_addr) static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, const char *section, const char *format, - uint32_t key, uint32_t *value) + u32 key, u32 *value) { char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; @@ -326,7 +326,7 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, const char *section, - uint32_t bank_num_in_accel) + u32 bank_num_in_accel) { if (adf_get_cfg_int(bank->accel_dev, section, ADF_ETRMGR_COALESCE_TIMER_FORMAT, @@ -340,12 +340,12 @@ static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, static int adf_init_bank(struct adf_accel_dev *accel_dev, struct adf_etr_bank_data *bank, - uint32_t bank_num, void __iomem *csr_addr) + u32 bank_num, void __iomem *csr_addr) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_etr_ring_data *ring; struct adf_etr_ring_data *tx_ring; - uint32_t i, coalesc_enabled = 0; + u32 i, coalesc_enabled = 0; memset(bank, 0, sizeof(*bank)); bank->bank_number = bank_num; @@ -417,8 +417,8 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) struct adf_etr_data *etr_data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; void __iomem *csr_addr; - uint32_t size; - uint32_t num_banks = 0; + u32 size; + u32 num_banks = 0; int i, ret; etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, @@ -464,7 +464,7 @@ EXPORT_SYMBOL_GPL(adf_init_etr_data); static void cleanup_bank(struct adf_etr_bank_data *bank) { - uint32_t i; + u32 i; for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { struct adf_accel_dev *accel_dev = bank->accel_dev; @@ -484,7 +484,7 @@ static void cleanup_bank(struct adf_etr_bank_data *bank) static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data = accel_dev->transport; - uint32_t i, num_banks = GET_MAX_BANKS(accel_dev); + u32 i, num_banks = GET_MAX_BANKS(accel_dev); for (i = 0; i < num_banks; i++) cleanup_bank(&etr_data->banks[i]); diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h index 0faea1032b93..2c95f1697c76 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.h +++ b/drivers/crypto/qat/qat_common/adf_transport.h @@ -10,10 +10,10 @@ struct adf_etr_ring_data; typedef void (*adf_callback_fn)(void *resp_msg); int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size, + u32 bank_num, u32 num_mgs, u32 msg_size, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr); -int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg); +int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); void adf_remove_ring(struct adf_etr_ring_data *ring); #endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 1a943f948100..950d1988556c 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -88,9 +88,9 @@ ADF_RING_CSR_RING_CONFIG + (ring << 2), value) #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ do { \ - uint32_t l_base = 0, u_base = 0; \ - l_base = (uint32_t)(value & 0xFFFFFFFF); \ - u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ + u32 l_base = 0, u_base = 0; \ + l_base = (u32)(value & 0xFFFFFFFF); \ + u_base = (u32)((value & 0xFFFFFFFF00000000ULL) >> 32); \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index fc610ea31869..df4c7195daae 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -19,12 +19,12 @@ struct adf_etr_ring_data { adf_callback_fn callback; struct adf_etr_bank_data *bank; dma_addr_t dma_addr; - uint16_t head; - uint16_t tail; - uint8_t ring_number; - uint8_t ring_size; - uint8_t msg_size; - uint8_t reserved; + u16 head; + u16 tail; + u8 ring_number; + u8 ring_size; + u8 msg_size; + u8 reserved; struct adf_etr_ring_debug_entry *ring_debug; } __packed; @@ -33,13 +33,13 @@ struct adf_etr_bank_data { struct tasklet_struct resp_handler; void __iomem *csr_addr; struct adf_accel_dev *accel_dev; - uint32_t irq_coalesc_timer; - uint16_t ring_mask; - uint16_t irq_mask; + u32 irq_coalesc_timer; + u16 ring_mask; + u16 irq_mask; spinlock_t lock; /* protects bank data struct */ struct dentry *bank_debug_dir; struct dentry *bank_debug_cfg; - uint32_t bank_number; + u32 bank_number; } __packed; struct adf_etr_data { diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index dbb58eacc395..8fe1ec344fa2 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -132,7 +132,7 @@ struct icp_qat_uof_encap_obj { struct icp_qat_uclo_encap_uwblock { unsigned int start_addr; unsigned int words_num; - uint64_t micro_words; + u64 micro_words; }; struct icp_qat_uclo_encap_page { @@ -171,7 +171,7 @@ struct icp_qat_uclo_objhdr { struct icp_qat_uof_strtable { unsigned int table_len; unsigned int reserved; - uint64_t strings; + u64 strings; }; struct icp_qat_uclo_objhandle { @@ -191,7 +191,7 @@ struct icp_qat_uclo_objhandle { unsigned int ae_num; unsigned int ustore_phy_size; void *obj_buf; - uint64_t *uword_buf; + u64 *uword_buf; }; struct icp_qat_uof_uword_block { diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 9df4aebf37ca..833d1a75666f 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -34,15 +34,15 @@ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; struct qat_alg_buf { - uint32_t len; - uint32_t resrvd; - uint64_t addr; + u32 len; + u32 resrvd; + u64 addr; } __packed; struct qat_alg_buf_list { - uint64_t resrvd; - uint32_t num_bufs; - uint32_t num_mapped_bufs; + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; struct qat_alg_buf bufers[]; } __packed __aligned(64); @@ -107,7 +107,7 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, struct qat_alg_aead_ctx *ctx, - const uint8_t *auth_key, + const u8 *auth_key, unsigned int auth_keylen) { SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); @@ -423,7 +423,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, struct icp_qat_fw_la_bulk_req *req, struct icp_qat_hw_cipher_algo_blk *cd, - const uint8_t *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; @@ -443,7 +443,7 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, } static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, - int alg, const uint8_t *key, + int alg, const u8 *key, unsigned int keylen, int mode) { struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; @@ -456,7 +456,7 @@ static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, } static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx, - int alg, const uint8_t *key, + int alg, const u8 *key, unsigned int keylen, int mode) { struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; @@ -534,7 +534,7 @@ error: } static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, - const uint8_t *key, + const u8 *key, unsigned int keylen, int mode) { @@ -548,7 +548,7 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, return 0; } -static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -562,7 +562,7 @@ static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, ICP_QAT_HW_CIPHER_CBC_MODE); } -static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -614,7 +614,7 @@ out_free_inst: return ret; } -static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -776,7 +776,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx; struct qat_crypto_instance *inst = ctx->inst; struct aead_request *areq = qat_req->aead_req; - uint8_t stat_filed = qat_resp->comn_resp.comn_status; + u8 stat_filed = qat_resp->comn_resp.comn_status; int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); qat_alg_free_bufl(inst, qat_req); @@ -791,7 +791,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx; struct qat_crypto_instance *inst = ctx->inst; struct skcipher_request *sreq = qat_req->skcipher_req; - uint8_t stat_filed = qat_resp->comn_resp.comn_status; + u8 stat_filed = qat_resp->comn_resp.comn_status; struct device *dev = &GET_DEV(ctx->inst->accel_dev); int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); @@ -836,18 +836,18 @@ static int qat_alg_aead_dec(struct aead_request *areq) qat_req->aead_ctx = ctx; qat_req->aead_req = areq; qat_req->cb = qat_aead_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; cipher_param->cipher_length = areq->cryptlen - digst_size; cipher_param->cipher_offset = areq->assoclen; memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -866,7 +866,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; - uint8_t *iv = areq->iv; + u8 *iv = areq->iv; int ret, ctr = 0; ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); @@ -878,11 +878,11 @@ static int qat_alg_aead_enc(struct aead_request *areq) qat_req->aead_ctx = ctx; qat_req->aead_req = areq; qat_req->cb = qat_aead_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); cipher_param->cipher_length = areq->cryptlen; @@ -892,7 +892,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) auth_param->auth_len = areq->assoclen + areq->cryptlen; do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -1029,7 +1029,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) qat_req->skcipher_ctx = ctx; qat_req->skcipher_req = req; qat_req->cb = qat_skcipher_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; @@ -1038,7 +1038,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -1089,7 +1089,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) qat_req->skcipher_ctx = ctx; qat_req->skcipher_req = req; qat_req->cb = qat_skcipher_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; @@ -1098,7 +1098,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 6cd67b06979b..846569ec9066 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -339,12 +339,12 @@ static int qat_dh_compute_value(struct kpp_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->input_param_count = n_input_params; msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) @@ -734,11 +734,11 @@ static int qat_rsa_enc(struct akcipher_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->input_param_count = 3; msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) @@ -882,7 +882,7 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; if (ctx->crt_mode) msg->input_param_count = 6; else @@ -890,7 +890,7 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 9a80da0b3b28..fa467e0f8285 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -34,13 +34,13 @@ #define AE(handle, ae) handle->hal_handle->aes[ae] -static const uint64_t inst_4b[] = { +static const u64 inst_4b[] = { 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A021000000ull }; -static const uint64_t inst[] = { +static const u64 inst[] = { 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, @@ -502,7 +502,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } -static uint64_t qat_hal_parity_64bit(uint64_t word) +static u64 qat_hal_parity_64bit(u64 word) { word ^= word >> 1; word ^= word >> 2; @@ -513,9 +513,9 @@ static uint64_t qat_hal_parity_64bit(uint64_t word) return word & 1; } -static uint64_t qat_hal_set_uword_ecc(uint64_t uword) +static u64 qat_hal_set_uword_ecc(u64 uword) { - uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, + u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, bit6_mask = 0xdaf69a46910ULL; @@ -534,7 +534,7 @@ static uint64_t qat_hal_set_uword_ecc(uint64_t uword) void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword) + unsigned int words_num, u64 *uword) { unsigned int ustore_addr; unsigned int i; @@ -544,7 +544,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < words_num; i++) { unsigned int uwrd_lo, uwrd_hi; - uint64_t tmp; + u64 tmp; tmp = qat_hal_set_uword_ecc(uword[i]); uwrd_lo = (unsigned int)(tmp & 0xffffffff); @@ -600,7 +600,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) csr_val |= CE_NN_MODE; qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst), - (uint64_t *)inst); + (u64 *)inst); qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); @@ -777,7 +777,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword) + unsigned int words_num, u64 *uword) { unsigned int i, uwrd_lo, uwrd_hi; unsigned int ustore_addr, misc_control; @@ -827,11 +827,11 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, #define MAX_EXEC_INST 100 static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, - uint64_t *micro_inst, unsigned int inst_num, + u64 *micro_inst, unsigned int inst_num, int code_off, unsigned int max_cycle, unsigned int *endpc) { - uint64_t savuwords[MAX_EXEC_INST]; + u64 savuwords[MAX_EXEC_INST]; unsigned int ind_lm_addr0, ind_lm_addr1; unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; unsigned int ind_cnt_sig; @@ -928,7 +928,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned int ctxarb_cntl, ustore_addr, ctx_enables; unsigned short reg_addr; int status = 0; - uint64_t insts, savuword; + u64 insts, savuword; reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (reg_addr == BAD_REGADDR) { @@ -940,7 +940,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, insts = 0xA070000000ull | (reg_addr & 0x3ff); break; default: - insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); + insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10); break; } savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); @@ -986,7 +986,7 @@ static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned short reg_num, unsigned int data) { unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; - uint64_t insts[] = { + u64 insts[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0F0000C0300ull, @@ -1032,13 +1032,13 @@ int qat_hal_get_ins_num(void) return ARRAY_SIZE(inst_4b); } -static int qat_hal_concat_micro_code(uint64_t *micro_inst, +static int qat_hal_concat_micro_code(u64 *micro_inst, unsigned int inst_num, unsigned int size, unsigned int addr, unsigned int *value) { int i; unsigned int cur_value; - const uint64_t *inst_arr; + const u64 *inst_arr; int fixup_offset; int usize = 0; int orig_num; @@ -1063,7 +1063,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst, static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, - int *pfirst_exec, uint64_t *micro_inst, + int *pfirst_exec, u64 *micro_inst, unsigned int inst_num) { int stat = 0; @@ -1096,7 +1096,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_batch_init *lm_init_header) { struct icp_qat_uof_batch_init *plm_init; - uint64_t *micro_inst_arry; + u64 *micro_inst_arry; int micro_inst_num; int alloc_inst_size; int first_exec = 1; @@ -1106,7 +1106,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, alloc_inst_size = lm_init_header->size; if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) alloc_inst_size = handle->hal_handle->max_ustore; - micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t), + micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64), GFP_KERNEL); if (!micro_inst_arry) return -ENOMEM; @@ -1185,7 +1185,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, data16low; unsigned short reg_mask; int status = 0; - uint64_t micro_inst[] = { + u64 micro_inst[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0A000000000ull, diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 3f079e336f64..4cc1f436b075 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -367,16 +367,16 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, unsigned int ustore_size; unsigned int patt_pos; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - uint64_t *fill_data; + u64 *fill_data; uof_image = image->img_ptr; - fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), + fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64), GFP_KERNEL); if (!fill_data) return -ENOMEM; for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) memcpy(&fill_data[i], &uof_image->fill_pattern, - sizeof(uint64_t)); + sizeof(u64)); page = image->page; for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { @@ -937,7 +937,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) pr_err("QAT: UOF incompatible\n"); return -EINVAL; } - obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), + obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64), GFP_KERNEL); if (!obj_handle->uword_buf) return -ENOMEM; @@ -1141,7 +1141,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, return 0; } -#define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low) +#define ADD_ADDR(high, low) ((((u64)high) << 32) + low) #define BITS_IN_DWORD 32 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, @@ -1470,10 +1470,10 @@ void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uclo_encap_page *encap_page, - uint64_t *uword, unsigned int addr_p, - unsigned int raddr, uint64_t fill) + u64 *uword, unsigned int addr_p, + unsigned int raddr, u64 fill) { - uint64_t uwrd = 0; + u64 uwrd = 0; unsigned int i; if (!encap_page) { @@ -1503,12 +1503,12 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, { unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - uint64_t fill_pat; + u64 fill_pat; /* load the page starting at appropriate ustore address */ /* get fill-pattern from an image -- they are all the same */ memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, - sizeof(uint64_t)); + sizeof(u64)); uw_physical_addr = encap_page->beg_addr_p; uw_relative_addr = 0; words_num = encap_page->micro_words_num; diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 0461040303a2..b975c263446d 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -6,13 +6,13 @@ #include "adf_dh895xcc_hw_data.h" /* Worker thread to service arbiter mappings based on dev SKUs */ -static const uint32_t thrd_to_arb_map_sku4[] = { +static const u32 thrd_to_arb_map_sku4[] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; -static const uint32_t thrd_to_arb_map_sku6[] = { +static const u32 thrd_to_arb_map_sku6[] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222 @@ -24,20 +24,20 @@ static struct adf_hw_device_class dh895xcc_class = { .instances = 0 }; -static uint32_t get_accel_mask(uint32_t fuse) +static u32 get_accel_mask(u32 fuse) { return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & ADF_DH895XCC_ACCELERATORS_MASK; } -static uint32_t get_ae_mask(uint32_t fuse) +static u32 get_ae_mask(u32 fuse) { return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK; } -static uint32_t get_num_accels(struct adf_hw_device_data *self) +static u32 get_num_accels(struct adf_hw_device_data *self) { - uint32_t i, ctr = 0; + u32 i, ctr = 0; if (!self || !self->accel_mask) return 0; @@ -49,9 +49,9 @@ static uint32_t get_num_accels(struct adf_hw_device_data *self) return ctr; } -static uint32_t get_num_aes(struct adf_hw_device_data *self) +static u32 get_num_aes(struct adf_hw_device_data *self) { - uint32_t i, ctr = 0; + u32 i, ctr = 0; if (!self || !self->ae_mask) return 0; @@ -63,17 +63,17 @@ static uint32_t get_num_aes(struct adf_hw_device_data *self) return ctr; } -static uint32_t get_misc_bar_id(struct adf_hw_device_data *self) +static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_PMISC_BAR; } -static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) +static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_ETR_BAR; } -static uint32_t get_sram_bar_id(struct adf_hw_device_data *self) +static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_SRAM_BAR; } @@ -117,12 +117,12 @@ static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, } } -static uint32_t get_pf2vf_offset(uint32_t i) +static u32 get_pf2vf_offset(u32 i) { return ADF_DH895XCC_PF2VF_OFFSET(i); } -static uint32_t get_vintmsk_offset(uint32_t i) +static u32 get_vintmsk_offset(u32 i) { return ADF_DH895XCC_VINTMSK_OFFSET(i); } From 59c14e5e0174a420628356ef6ef07c414c62fdd7 Mon Sep 17 00:00:00 2001 From: Wojciech Ziemba Date: Wed, 3 Jun 2020 18:33:45 +0100 Subject: [PATCH 009/157] crypto: qat - replace user types with kernel ABI __u types Kernel source code should not contain stdint.h types. This patch replaces uintXX_t types with kernel space ABI types. Signed-off-by: Wojciech Ziemba Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../crypto/qat/qat_common/adf_cfg_common.h | 24 +-- drivers/crypto/qat/qat_common/adf_cfg_user.h | 10 +- drivers/crypto/qat/qat_common/icp_qat_fw.h | 58 +++---- .../qat/qat_common/icp_qat_fw_init_admin.h | 46 ++--- drivers/crypto/qat/qat_common/icp_qat_fw_la.h | 158 +++++++++--------- .../crypto/qat/qat_common/icp_qat_fw_pke.h | 52 +++--- drivers/crypto/qat/qat_common/icp_qat_hw.h | 16 +- 7 files changed, 182 insertions(+), 182 deletions(-) diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index e818033c1012..1ef46ccfba47 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -37,16 +37,16 @@ enum adf_device_type { struct adf_dev_status_info { enum adf_device_type type; - u32 accel_id; - u32 instance_id; - uint8_t num_ae; - uint8_t num_accel; - uint8_t num_logical_accel; - uint8_t banks_per_accel; - uint8_t state; - uint8_t bus; - uint8_t dev; - uint8_t fun; + __u32 accel_id; + __u32 instance_id; + __u8 num_ae; + __u8 num_accel; + __u8 num_logical_accel; + __u8 banks_per_accel; + __u8 state; + __u8 bus; + __u8 dev; + __u8 fun; char name[MAX_DEVICE_NAME_SIZE]; }; @@ -57,6 +57,6 @@ struct adf_dev_status_info { struct adf_user_cfg_ctl_data) #define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \ struct adf_user_cfg_ctl_data) -#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t) -#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t) +#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32) +#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32) #endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h index 86b4a3e3f355..421f4fb8b4dd 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_user.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -11,7 +11,7 @@ struct adf_user_cfg_key_val { char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; union { struct adf_user_cfg_key_val *next; - uint64_t padding3; + __u64 padding3; }; enum adf_cfg_val_type type; } __packed; @@ -20,19 +20,19 @@ struct adf_user_cfg_section { char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; union { struct adf_user_cfg_key_val *params; - uint64_t padding1; + __u64 padding1; }; union { struct adf_user_cfg_section *next; - uint64_t padding3; + __u64 padding3; }; } __packed; struct adf_user_cfg_ctl_data { union { struct adf_user_cfg_section *config_section; - uint64_t padding; + __u64 padding; }; - uint8_t device_id; + __u8 device_id; } __packed; #endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h index 1b9a11f574ee..6dc09d270082 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -45,41 +45,41 @@ enum icp_qat_fw_comn_request_id { struct icp_qat_fw_comn_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t serv_specif_fields[4]; + __u32 serv_specif_fields[4]; } s1; } u; }; struct icp_qat_fw_comn_req_mid { - uint64_t opaque_data; - uint64_t src_data_addr; - uint64_t dest_data_addr; - uint32_t src_length; - uint32_t dst_length; + __u64 opaque_data; + __u64 src_data_addr; + __u64 dest_data_addr; + __u32 src_length; + __u32 dst_length; }; struct icp_qat_fw_comn_req_cd_ctrl { - uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; + __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; }; struct icp_qat_fw_comn_req_hdr { - uint8_t resrvd1; - uint8_t service_cmd_id; - uint8_t service_type; - uint8_t hdr_flags; - uint16_t serv_specif_flags; - uint16_t comn_req_flags; + __u8 resrvd1; + __u8 service_cmd_id; + __u8 service_type; + __u8 hdr_flags; + __u16 serv_specif_flags; + __u16 comn_req_flags; }; struct icp_qat_fw_comn_req_rqpars { - uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; + __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; }; struct icp_qat_fw_comn_req { @@ -91,24 +91,24 @@ struct icp_qat_fw_comn_req { }; struct icp_qat_fw_comn_error { - uint8_t xlat_err_code; - uint8_t cmp_err_code; + __u8 xlat_err_code; + __u8 cmp_err_code; }; struct icp_qat_fw_comn_resp_hdr { - uint8_t resrvd1; - uint8_t service_id; - uint8_t response_type; - uint8_t hdr_flags; + __u8 resrvd1; + __u8 service_id; + __u8 response_type; + __u8 hdr_flags; struct icp_qat_fw_comn_error comn_error; - uint8_t comn_status; - uint8_t cmd_id; + __u8 comn_status; + __u8 cmd_id; }; struct icp_qat_fw_comn_resp { struct icp_qat_fw_comn_resp_hdr comn_hdr; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u64 opaque_data; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; }; #define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h index 2024babd7fc7..c867e9a0a540 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h @@ -23,35 +23,35 @@ enum icp_qat_fw_init_admin_resp_status { }; struct icp_qat_fw_init_admin_req { - uint16_t init_cfg_sz; - uint8_t resrvd1; - uint8_t init_admin_cmd_id; - uint32_t resrvd2; - uint64_t opaque_data; - uint64_t init_cfg_ptr; - uint64_t resrvd3; + __u16 init_cfg_sz; + __u8 resrvd1; + __u8 init_admin_cmd_id; + __u32 resrvd2; + __u64 opaque_data; + __u64 init_cfg_ptr; + __u64 resrvd3; }; struct icp_qat_fw_init_admin_resp_hdr { - uint8_t flags; - uint8_t resrvd1; - uint8_t status; - uint8_t init_admin_cmd_id; + __u8 flags; + __u8 resrvd1; + __u8 status; + __u8 init_admin_cmd_id; }; struct icp_qat_fw_init_admin_resp_pars { union { - uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; struct { - uint32_t version_patch_num; - uint8_t context_id; - uint8_t ae_id; - uint16_t resrvd1; - uint64_t resrvd2; + __u32 version_patch_num; + __u8 context_id; + __u8 ae_id; + __u16 resrvd1; + __u64 resrvd2; } s1; struct { - uint64_t req_rec_count; - uint64_t resp_sent_count; + __u64 req_rec_count; + __u64 resp_sent_count; } s2; } u; }; @@ -59,13 +59,13 @@ struct icp_qat_fw_init_admin_resp_pars { struct icp_qat_fw_init_admin_resp { struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr; union { - uint32_t resrvd2; + __u32 resrvd2; struct { - uint16_t version_minor_num; - uint16_t version_major_num; + __u16 version_minor_num; + __u16 version_major_num; } s; } u; - uint64_t opaque_data; + __u64 opaque_data; struct icp_qat_fw_init_admin_resp_pars init_resp_pars; }; diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h index ee0a0c944b31..6757ec09d81f 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h @@ -182,14 +182,14 @@ struct icp_qat_fw_la_bulk_req { struct icp_qat_fw_cipher_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; } s1; } u; }; @@ -197,70 +197,70 @@ struct icp_qat_fw_cipher_req_hdr_cd_pars { struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; } sl; } u; }; struct icp_qat_fw_cipher_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id; - uint8_t cipher_padding_sz; - uint8_t resrvd1; - uint16_t resrvd2; - uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; + __u8 cipher_state_sz; + __u8 cipher_key_sz; + __u8 cipher_cfg_offset; + __u8 next_curr_id; + __u8 cipher_padding_sz; + __u8 resrvd1; + __u16 resrvd2; + __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; }; struct icp_qat_fw_auth_cd_ctrl_hdr { - uint32_t resrvd1; - uint8_t resrvd2; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id; - uint8_t resrvd3; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd4; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; + __u32 resrvd1; + __u8 resrvd2; + __u8 hash_flags; + __u8 hash_cfg_offset; + __u8 next_curr_id; + __u8 resrvd3; + __u8 outer_prefix_sz; + __u8 final_sz; + __u8 inner_res_sz; + __u8 resrvd4; + __u8 inner_state1_sz; + __u8 inner_state2_offset; + __u8 inner_state2_sz; + __u8 outer_config_offset; + __u8 outer_state1_sz; + __u8 outer_res_sz; + __u8 outer_prefix_offset; }; struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id_cipher; - uint8_t cipher_padding_sz; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id_auth; - uint8_t resrvd1; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd2; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; + __u8 cipher_state_sz; + __u8 cipher_key_sz; + __u8 cipher_cfg_offset; + __u8 next_curr_id_cipher; + __u8 cipher_padding_sz; + __u8 hash_flags; + __u8 hash_cfg_offset; + __u8 next_curr_id_auth; + __u8 resrvd1; + __u8 outer_prefix_sz; + __u8 final_sz; + __u8 inner_res_sz; + __u8 resrvd2; + __u8 inner_state1_sz; + __u8 inner_state2_offset; + __u8 inner_state2_sz; + __u8 outer_config_offset; + __u8 outer_state1_sz; + __u8 outer_res_sz; + __u8 outer_prefix_offset; }; #define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 @@ -271,48 +271,48 @@ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { #define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) struct icp_qat_fw_la_cipher_req_params { - uint32_t cipher_offset; - uint32_t cipher_length; + __u32 cipher_offset; + __u32 cipher_length; union { - uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; struct { - uint64_t cipher_IV_ptr; - uint64_t resrvd1; + __u64 cipher_IV_ptr; + __u64 resrvd1; } s; } u; }; struct icp_qat_fw_la_auth_req_params { - uint32_t auth_off; - uint32_t auth_len; + __u32 auth_off; + __u32 auth_len; union { - uint64_t auth_partial_st_prefix; - uint64_t aad_adr; + __u64 auth_partial_st_prefix; + __u64 aad_adr; } u1; - uint64_t auth_res_addr; + __u64 auth_res_addr; union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; + __u8 inner_prefix_sz; + __u8 aad_sz; } u2; - uint8_t resrvd1; - uint8_t hash_state_sz; - uint8_t auth_res_sz; + __u8 resrvd1; + __u8 hash_state_sz; + __u8 auth_res_sz; } __packed; struct icp_qat_fw_la_auth_req_params_resrvd_flds { - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; + __u8 inner_prefix_sz; + __u8 aad_sz; } u2; - uint8_t resrvd1; - uint16_t resrvd2; + __u8 resrvd1; + __u16 resrvd2; }; struct icp_qat_fw_la_resp { struct icp_qat_fw_comn_resp_hdr comn_resp; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u64 opaque_data; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; }; #define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h index 4354a68de327..9dddae0009fc 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h @@ -6,51 +6,51 @@ #include "icp_qat_fw.h" struct icp_qat_fw_req_hdr_pke_cd_pars { - u64 content_desc_addr; - u32 content_desc_resrvd; - u32 func_id; + __u64 content_desc_addr; + __u32 content_desc_resrvd; + __u32 func_id; }; struct icp_qat_fw_req_pke_mid { - u64 opaque; - u64 src_data_addr; - u64 dest_data_addr; + __u64 opaque; + __u64 src_data_addr; + __u64 dest_data_addr; }; struct icp_qat_fw_req_pke_hdr { - u8 resrvd1; - u8 resrvd2; - u8 service_type; - u8 hdr_flags; - u16 comn_req_flags; - u16 resrvd4; + __u8 resrvd1; + __u8 resrvd2; + __u8 service_type; + __u8 hdr_flags; + __u16 comn_req_flags; + __u16 resrvd4; struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars; }; struct icp_qat_fw_pke_request { struct icp_qat_fw_req_pke_hdr pke_hdr; struct icp_qat_fw_req_pke_mid pke_mid; - u8 output_param_count; - u8 input_param_count; - u16 resrvd1; - u32 resrvd2; - u64 next_req_adr; + __u8 output_param_count; + __u8 input_param_count; + __u16 resrvd1; + __u32 resrvd2; + __u64 next_req_adr; }; struct icp_qat_fw_resp_pke_hdr { - u8 resrvd1; - u8 resrvd2; - u8 response_type; - u8 hdr_flags; - u16 comn_resp_flags; - u16 resrvd4; + __u8 resrvd1; + __u8 resrvd2; + __u8 response_type; + __u8 hdr_flags; + __u16 comn_resp_flags; + __u16 resrvd4; }; struct icp_qat_fw_pke_resp { struct icp_qat_fw_resp_pke_hdr pke_resp_hdr; - u64 opaque; - u64 src_data_addr; - u64 dest_data_addr; + __u64 opaque; + __u64 src_data_addr; + __u64 dest_data_addr; }; #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7 diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h index 1f1e7465f3c1..c4b6ef1506ab 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h @@ -61,8 +61,8 @@ enum icp_qat_hw_auth_mode { }; struct icp_qat_hw_auth_config { - uint32_t config; - uint32_t reserved; + __u32 config; + __u32 reserved; }; #define QAT_AUTH_MODE_BITPOS 4 @@ -87,7 +87,7 @@ struct icp_qat_hw_auth_config { struct icp_qat_hw_auth_counter { __be32 counter; - uint32_t reserved; + __u32 reserved; }; #define QAT_AUTH_COUNT_MASK 0xFFFFFFFF @@ -147,9 +147,9 @@ struct icp_qat_hw_auth_setup { struct icp_qat_hw_auth_sha512 { struct icp_qat_hw_auth_setup inner_setup; - uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; + __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ]; struct icp_qat_hw_auth_setup outer_setup; - uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; + __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ]; }; struct icp_qat_hw_auth_algo_blk { @@ -183,8 +183,8 @@ enum icp_qat_hw_cipher_mode { }; struct icp_qat_hw_cipher_config { - uint32_t val; - uint32_t reserved; + __u32 val; + __u32 reserved; }; enum icp_qat_hw_cipher_dir { @@ -252,7 +252,7 @@ enum icp_qat_hw_cipher_convert { struct icp_qat_hw_cipher_aes256_f8 { struct icp_qat_hw_cipher_config cipher_config; - uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; + __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; }; struct icp_qat_hw_cipher_algo_blk { From 07b048f41ac1222a781573ac1c18592401e4a2a3 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Wed, 3 Jun 2020 18:33:46 +0100 Subject: [PATCH 010/157] crypto: qat - remove packed attribute in etr structs Remove packed attribute in adf_etr_bank_data and adf_etr_ring_data. Fields in these structures are reordered in order to avoid holes. Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- .../crypto/qat/qat_common/adf_transport_internal.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index df4c7195daae..c7faf4e2d302 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -15,32 +15,31 @@ struct adf_etr_ring_debug_entry { struct adf_etr_ring_data { void *base_addr; atomic_t *inflights; - spinlock_t lock; /* protects ring data struct */ adf_callback_fn callback; struct adf_etr_bank_data *bank; dma_addr_t dma_addr; + struct adf_etr_ring_debug_entry *ring_debug; + spinlock_t lock; /* protects ring data struct */ u16 head; u16 tail; u8 ring_number; u8 ring_size; u8 msg_size; - u8 reserved; - struct adf_etr_ring_debug_entry *ring_debug; -} __packed; +}; struct adf_etr_bank_data { struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; struct tasklet_struct resp_handler; void __iomem *csr_addr; - struct adf_accel_dev *accel_dev; u32 irq_coalesc_timer; + u32 bank_number; u16 ring_mask; u16 irq_mask; spinlock_t lock; /* protects bank data struct */ + struct adf_accel_dev *accel_dev; struct dentry *bank_debug_dir; struct dentry *bank_debug_cfg; - u32 bank_number; -} __packed; +}; struct adf_etr_data { struct adf_etr_bank_data *banks; From 3906f640224dbe7714b52b66d7d68c0812808e19 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 5 Jun 2020 16:59:18 +1000 Subject: [PATCH 011/157] crc-t10dif: Fix potential crypto notify dead-lock The crypto notify call occurs with a read mutex held so you must not do any substantial work directly. In particular, you cannot call crypto_alloc_* as they may trigger further notifications which may dead-lock in the presence of another writer. This patch fixes this by postponing the work into a work queue and taking the same lock in the module init function. While we're at it this patch also ensures that all RCU accesses are marked appropriately (tested with sparse). Finally this also reveals a race condition in module param show function as it may be called prior to the module init function. It's fixed by testing whether crct10dif_tfm is NULL (this is true iff the init function has not completed assuming fallback is false). Fixes: 11dcb1037f40 ("crc-t10dif: Allow current transform to be...") Fixes: b76377543b73 ("crc-t10dif: Pick better transform if one...") Signed-off-by: Herbert Xu Reviewed-by: Martin K. Petersen Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- lib/crc-t10dif.c | 54 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index 8cc01a603416..c9acf1c12cfc 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -19,39 +19,46 @@ static struct crypto_shash __rcu *crct10dif_tfm; static struct static_key crct10dif_fallback __read_mostly; static DEFINE_MUTEX(crc_t10dif_mutex); +static struct work_struct crct10dif_rehash_work; -static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data) +static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data) { struct crypto_alg *alg = data; - struct crypto_shash *new, *old; if (val != CRYPTO_MSG_ALG_LOADED || static_key_false(&crct10dif_fallback) || strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING))) return 0; + schedule_work(&crct10dif_rehash_work); + return 0; +} + +static void crc_t10dif_rehash(struct work_struct *work) +{ + struct crypto_shash *new, *old; + mutex_lock(&crc_t10dif_mutex); old = rcu_dereference_protected(crct10dif_tfm, lockdep_is_held(&crc_t10dif_mutex)); if (!old) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } new = crypto_alloc_shash("crct10dif", 0, 0); if (IS_ERR(new)) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } rcu_assign_pointer(crct10dif_tfm, new); mutex_unlock(&crc_t10dif_mutex); synchronize_rcu(); crypto_free_shash(old); - return 0; } static struct notifier_block crc_t10dif_nb = { - .notifier_call = crc_t10dif_rehash, + .notifier_call = crc_t10dif_notify, }; __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) @@ -86,19 +93,26 @@ EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) { + struct crypto_shash *tfm; + + INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash); crypto_register_notifier(&crc_t10dif_nb); - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); - if (IS_ERR(crct10dif_tfm)) { + mutex_lock(&crc_t10dif_mutex); + tfm = crypto_alloc_shash("crct10dif", 0, 0); + if (IS_ERR(tfm)) { static_key_slow_inc(&crct10dif_fallback); - crct10dif_tfm = NULL; + tfm = NULL; } + RCU_INIT_POINTER(crct10dif_tfm, tfm); + mutex_unlock(&crc_t10dif_mutex); return 0; } static void __exit crc_t10dif_mod_fini(void) { crypto_unregister_notifier(&crc_t10dif_nb); - crypto_free_shash(crct10dif_tfm); + cancel_work_sync(&crct10dif_rehash_work); + crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1)); } module_init(crc_t10dif_mod_init); @@ -106,11 +120,27 @@ module_exit(crc_t10dif_mod_fini); static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp) { + struct crypto_shash *tfm; + const char *name; + int len; + if (static_key_false(&crct10dif_fallback)) return sprintf(buffer, "fallback\n"); - return sprintf(buffer, "%s\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm))); + rcu_read_lock(); + tfm = rcu_dereference(crct10dif_tfm); + if (!tfm) { + len = sprintf(buffer, "init\n"); + goto unlock; + } + + name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); + len = sprintf(buffer, "%s\n", name); + +unlock: + rcu_read_unlock(); + + return len; } module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644); From 57b1aac1b426b7255afa195298ed691ffea204c6 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Mon, 8 Jun 2020 22:01:11 +0800 Subject: [PATCH 012/157] crypto: hisilicon - update SEC driver module parameter As stress-ng running SEC engine on the Ubuntu OS, we found that SEC only supports two threads each with one TFM based on the default module parameter 'ctx_q_num'. If running more threads, stress-ng will fail since it cannot get more TFMs. In order to fix this, we adjusted the default values of the module parameters to support more TFMs. Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index a4cb58b54b25..57de51fa6f51 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -30,9 +30,9 @@ #define SEC_SQE_SIZE 128 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) -#define SEC_PF_DEF_Q_NUM 64 +#define SEC_PF_DEF_Q_NUM 256 #define SEC_PF_DEF_Q_BASE 0 -#define SEC_CTX_Q_NUM_DEF 24 +#define SEC_CTX_Q_NUM_DEF 2 #define SEC_CTX_Q_NUM_MAX 32 #define SEC_CTRL_CNT_CLR_CE 0x301120 @@ -191,7 +191,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = { }; static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); -MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)"); +MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); static const struct kernel_param_ops vfs_num_ops = { .set = vfs_num_set, From be924e0aaa315a179c25221685e3f1a35a70156e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 9 Jun 2020 23:39:42 -0700 Subject: [PATCH 013/157] crc-t10dif: use fallback in initial state Currently the crc-t10dif module starts out with the fallback disabled and crct10dif_tfm == NULL. crc_t10dif_mod_init() tries to allocate crct10dif_tfm, and if it fails it enables the fallback. This is backwards because it means that any call to crc_t10dif() prior to module_init (which could theoretically happen from built-in code) will crash rather than use the fallback as expected. Also, it means that if the initial tfm allocation fails, then the fallback stays permanently enabled even if a crct10dif implementation is loaded later. Change it to use the more logical solution of starting with the fallback enabled, and disabling the fallback when a tfm gets allocated for the first time. This change also ends up simplifying the code. Also take the opportunity to convert the code to use the new static_key API, which is much less confusing than the old and deprecated one. Cc: Martin K. Petersen Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- lib/crc-t10dif.c | 37 ++++++++++--------------------------- 1 file changed, 10 insertions(+), 27 deletions(-) diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index c9acf1c12cfc..af3613367ef9 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -17,7 +17,7 @@ #include static struct crypto_shash __rcu *crct10dif_tfm; -static struct static_key crct10dif_fallback __read_mostly; +static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback); static DEFINE_MUTEX(crc_t10dif_mutex); static struct work_struct crct10dif_rehash_work; @@ -26,7 +26,6 @@ static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, voi struct crypto_alg *alg = data; if (val != CRYPTO_MSG_ALG_LOADED || - static_key_false(&crct10dif_fallback) || strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING))) return 0; @@ -41,10 +40,6 @@ static void crc_t10dif_rehash(struct work_struct *work) mutex_lock(&crc_t10dif_mutex); old = rcu_dereference_protected(crct10dif_tfm, lockdep_is_held(&crc_t10dif_mutex)); - if (!old) { - mutex_unlock(&crc_t10dif_mutex); - return; - } new = crypto_alloc_shash("crct10dif", 0, 0); if (IS_ERR(new)) { mutex_unlock(&crc_t10dif_mutex); @@ -53,8 +48,12 @@ static void crc_t10dif_rehash(struct work_struct *work) rcu_assign_pointer(crct10dif_tfm, new); mutex_unlock(&crc_t10dif_mutex); - synchronize_rcu(); - crypto_free_shash(old); + if (old) { + synchronize_rcu(); + crypto_free_shash(old); + } else { + static_branch_disable(&crct10dif_fallback); + } } static struct notifier_block crc_t10dif_nb = { @@ -69,7 +68,7 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) } desc; int err; - if (static_key_false(&crct10dif_fallback)) + if (static_branch_unlikely(&crct10dif_fallback)) return crc_t10dif_generic(crc, buffer, len); rcu_read_lock(); @@ -93,18 +92,9 @@ EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) { - struct crypto_shash *tfm; - INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash); crypto_register_notifier(&crc_t10dif_nb); - mutex_lock(&crc_t10dif_mutex); - tfm = crypto_alloc_shash("crct10dif", 0, 0); - if (IS_ERR(tfm)) { - static_key_slow_inc(&crct10dif_fallback); - tfm = NULL; - } - RCU_INIT_POINTER(crct10dif_tfm, tfm); - mutex_unlock(&crc_t10dif_mutex); + crc_t10dif_rehash(&crct10dif_rehash_work); return 0; } @@ -124,20 +114,13 @@ static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp const char *name; int len; - if (static_key_false(&crct10dif_fallback)) + if (static_branch_unlikely(&crct10dif_fallback)) return sprintf(buffer, "fallback\n"); rcu_read_lock(); tfm = rcu_dereference(crct10dif_tfm); - if (!tfm) { - len = sprintf(buffer, "init\n"); - goto unlock; - } - name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); len = sprintf(buffer, "%s\n", name); - -unlock: rcu_read_unlock(); return len; From 29195232fa2f722449c1c6850277b4ca9fa5f781 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 9 Jun 2020 23:39:43 -0700 Subject: [PATCH 014/157] crc-t10dif: clean up some more things - Correctly compare the algorithm name in crc_t10dif_notify(). - Use proper NOTIFY_* status codes instead of 0. - Consistently use CRC_T10DIF_STRING instead of "crct10dif" directly. - Use a proper type for the shash_desc context. - Use crypto_shash_driver_name() instead of open-coding it. - Make crc_t10dif_transform_show() use snprintf() rather than sprintf(). This isn't actually necessary since the buffer has size PAGE_SIZE and CRYPTO_MAX_ALG_NAME < PAGE_SIZE, but it's good practice. - Give the "transform" sysfs file mode 0444 rather than 0644, since it doesn't implement a setter method. - Adjust the module description to not be the same as crct10dif-generic. Cc: Martin K. Petersen Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- lib/crc-t10dif.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index af3613367ef9..1ed2ed487097 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -26,11 +26,11 @@ static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, voi struct crypto_alg *alg = data; if (val != CRYPTO_MSG_ALG_LOADED || - strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING))) - return 0; + strcmp(alg->cra_name, CRC_T10DIF_STRING)) + return NOTIFY_DONE; schedule_work(&crct10dif_rehash_work); - return 0; + return NOTIFY_OK; } static void crc_t10dif_rehash(struct work_struct *work) @@ -40,7 +40,7 @@ static void crc_t10dif_rehash(struct work_struct *work) mutex_lock(&crc_t10dif_mutex); old = rcu_dereference_protected(crct10dif_tfm, lockdep_is_held(&crc_t10dif_mutex)); - new = crypto_alloc_shash("crct10dif", 0, 0); + new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0); if (IS_ERR(new)) { mutex_unlock(&crc_t10dif_mutex); return; @@ -64,7 +64,7 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) { struct { struct shash_desc shash; - char ctx[2]; + __u16 crc; } desc; int err; @@ -73,14 +73,13 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) rcu_read_lock(); desc.shash.tfm = rcu_dereference(crct10dif_tfm); - *(__u16 *)desc.ctx = crc; - + desc.crc = crc; err = crypto_shash_update(&desc.shash, buffer, len); rcu_read_unlock(); BUG_ON(err); - return *(__u16 *)desc.ctx; + return desc.crc; } EXPORT_SYMBOL(crc_t10dif_update); @@ -111,7 +110,6 @@ module_exit(crc_t10dif_mod_fini); static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp) { struct crypto_shash *tfm; - const char *name; int len; if (static_branch_unlikely(&crct10dif_fallback)) @@ -119,15 +117,15 @@ static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp rcu_read_lock(); tfm = rcu_dereference(crct10dif_tfm); - name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); - len = sprintf(buffer, "%s\n", name); + len = snprintf(buffer, PAGE_SIZE, "%s\n", + crypto_shash_driver_name(tfm)); rcu_read_unlock(); return len; } -module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644); +module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444); -MODULE_DESCRIPTION("T10 DIF CRC calculation"); +MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)"); MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crct10dif"); From c31b4adee161caa7424febee222d99984008bfa2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 11 Jun 2020 16:39:34 +0100 Subject: [PATCH 015/157] crypto: caam/qi2 - remove redundant assignment to ret The variable ret is being assigned a value that is never read, the error exit path via label 'unmap' returns -ENOMEM anyhow, so assigning ret with -ENOMEM is redundamt. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 35fbb3a74cb4..1e90412afea2 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -4044,7 +4044,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req) DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); - ret = -ENOMEM; goto unmap; } edesc->qm_sg_bytes = qm_sg_bytes; @@ -4055,7 +4054,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req) if (dma_mapping_error(ctx->dev, state->ctx_dma)) { dev_err(ctx->dev, "unable to map ctx\n"); state->ctx_dma = 0; - ret = -ENOMEM; goto unmap; } From 624e62ccb20b011cb5b23c53b3f400c3389ce60d Mon Sep 17 00:00:00 2001 From: Wojciech Ziemba Date: Thu, 11 Jun 2020 22:14:47 +0100 Subject: [PATCH 016/157] crypto: qat - update fw init admin msg This patch tidies up the definition of init/admin request and response messages by removing the icp_qat_fw_init_admin_resp_pars structure and embedding it into icp_qat_fw_init_admin_resp. Signed-off-by: Wojciech Ziemba Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_admin.c | 4 +- .../qat/qat_common/icp_qat_fw_init_admin.h | 79 ++++++++++++------- 2 files changed, 54 insertions(+), 29 deletions(-) diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index e363d13dcd78..a51ba0039cff 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -152,7 +152,7 @@ static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) int i; memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); - req.init_admin_cmd_id = cmd; + req.cmd_id = cmd; if (cmd == ICP_QAT_FW_CONSTANTS_CFG) { req.init_cfg_sz = 1024; @@ -161,7 +161,7 @@ static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || - resp.init_resp_hdr.status) + resp.status) return -EFAULT; } return 0; diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h index c867e9a0a540..d4d188cd7ed0 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h @@ -25,48 +25,73 @@ enum icp_qat_fw_init_admin_resp_status { struct icp_qat_fw_init_admin_req { __u16 init_cfg_sz; __u8 resrvd1; - __u8 init_admin_cmd_id; + __u8 cmd_id; __u32 resrvd2; __u64 opaque_data; __u64 init_cfg_ptr; - __u64 resrvd3; -}; -struct icp_qat_fw_init_admin_resp_hdr { - __u8 flags; - __u8 resrvd1; - __u8 status; - __u8 init_admin_cmd_id; -}; - -struct icp_qat_fw_init_admin_resp_pars { union { - __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; struct { - __u32 version_patch_num; - __u8 context_id; - __u8 ae_id; - __u16 resrvd1; - __u64 resrvd2; - } s1; - struct { - __u64 req_rec_count; - __u64 resp_sent_count; - } s2; - } u; + __u16 ibuf_size_in_kb; + __u16 resrvd3; + }; + __u32 idle_filter; + }; + + __u32 resrvd4; }; struct icp_qat_fw_init_admin_resp { - struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr; + __u8 flags; + __u8 resrvd1; + __u8 status; + __u8 cmd_id; union { __u32 resrvd2; struct { __u16 version_minor_num; __u16 version_major_num; - } s; - } u; + }; + }; __u64 opaque_data; - struct icp_qat_fw_init_admin_resp_pars init_resp_pars; + union { + __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + __u32 version_patch_num; + __u8 context_id; + __u8 ae_id; + __u16 resrvd4; + __u64 resrvd5; + }; + struct { + __u64 req_rec_count; + __u64 resp_sent_count; + }; + struct { + __u16 compression_algos; + __u16 checksum_algos; + __u32 deflate_capabilities; + __u32 resrvd6; + __u32 lzs_capabilities; + }; + struct { + __u32 cipher_algos; + __u32 hash_algos; + __u16 keygen_algos; + __u16 other; + __u16 public_key_algos; + __u16 prime_algos; + }; + struct { + __u64 timestamp; + __u64 resrvd7; + }; + struct { + __u32 successful_count; + __u32 unsuccessful_count; + __u64 resrvd8; + }; + }; }; #define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 From e4e245ad031f82795a2512cb6d3167c824c6fe81 Mon Sep 17 00:00:00 2001 From: Wojciech Ziemba Date: Thu, 11 Jun 2020 22:14:48 +0100 Subject: [PATCH 017/157] crypto: qat - send admin messages to set of AEs Update the logic that sends admin messages to be able to target a subset of Acceleration Engines (AEs) in the device. In future not all admin messages need to be sent to all the AEs. Signed-off-by: Wojciech Ziemba Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_admin.c | 66 ++++++++++++++++------- 1 file changed, 47 insertions(+), 19 deletions(-) diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index a51ba0039cff..aa610f80296d 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -16,6 +16,7 @@ #define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 #define ADF_ADMINMSG_LEN 32 +#define ADF_CONST_TABLE_SIZE 1024 static const u8 const_tab[1024] __aligned(1024) = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -114,6 +115,7 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, int offset = ae * ADF_ADMINMSG_LEN * 2; void __iomem *mailbox = admin->mailbox_addr; int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; + struct icp_qat_fw_init_admin_req *request = in; int times, received; mutex_lock(&admin->lock); @@ -138,33 +140,57 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); else dev_err(&GET_DEV(accel_dev), - "Failed to send admin msg to accelerator\n"); + "Failed to send admin msg %d to accelerator %d\n", + request->cmd_id, ae); mutex_unlock(&admin->lock); return received ? 0 : -EFAULT; } -static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) +static int adf_send_admin(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req, + struct icp_qat_fw_init_admin_resp *resp, + const unsigned long ae_mask) +{ + u32 ae; + + for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER) + if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) || + resp->status) + return -EFAULT; + + return 0; +} + +static int adf_init_me(struct adf_accel_dev *accel_dev) { - struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; - int i; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + u32 ae_mask = hw_device->ae_mask; - memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); - req.cmd_id = cmd; + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_INIT_ME; - if (cmd == ICP_QAT_FW_CONSTANTS_CFG) { - req.init_cfg_sz = 1024; - req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; - } - for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { - memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); - if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || - resp.status) - return -EFAULT; - } - return 0; + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + +static int adf_set_fw_constants(struct adf_accel_dev *accel_dev) +{ + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + u32 ae_mask = hw_device->ae_mask; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG; + + req.init_cfg_sz = ADF_CONST_TABLE_SIZE; + req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); } /** @@ -177,11 +203,13 @@ static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { - int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); + int ret; + ret = adf_init_me(accel_dev); if (ret) return ret; - return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG); + + return adf_set_fw_constants(accel_dev); } EXPORT_SYMBOL_GPL(adf_send_admin_init); From a79d471c6510db6a69fc2c0f474dbe688bea7641 Mon Sep 17 00:00:00 2001 From: Wojciech Ziemba Date: Thu, 11 Jun 2020 22:14:49 +0100 Subject: [PATCH 018/157] crypto: qat - update timeout logic in put admin msg Replace timeout logic in adf_put_admin_msg_sync() with existing macro readl_poll_timeout(). Signed-off-by: Wojciech Ziemba Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_admin.c | 34 +++++++++++++---------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index aa610f80296d..1c8ca151a963 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include "adf_accel_devices.h" @@ -17,6 +17,8 @@ #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 #define ADF_ADMINMSG_LEN 32 #define ADF_CONST_TABLE_SIZE 1024 +#define ADF_ADMIN_POLL_DELAY_US 20 +#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC) static const u8 const_tab[1024] __aligned(1024) = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -111,12 +113,13 @@ struct adf_admin_comms { static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, void *in, void *out) { + int ret; + u32 status; struct adf_admin_comms *admin = accel_dev->admin; int offset = ae * ADF_ADMINMSG_LEN * 2; void __iomem *mailbox = admin->mailbox_addr; int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; struct icp_qat_fw_init_admin_req *request = in; - int times, received; mutex_lock(&admin->lock); @@ -127,24 +130,25 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); ADF_CSR_WR(mailbox, mb_offset, 1); - received = 0; - for (times = 0; times < 50; times++) { - msleep(20); - if (ADF_CSR_RD(mailbox, mb_offset) == 0) { - received = 1; - break; - } - } - if (received) - memcpy(out, admin->virt_addr + offset + - ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); - else + + ret = readl_poll_timeout(mailbox + mb_offset, status, + status == 0, ADF_ADMIN_POLL_DELAY_US, + ADF_ADMIN_POLL_TIMEOUT_US); + if (ret < 0) { + /* Response timeout */ dev_err(&GET_DEV(accel_dev), "Failed to send admin msg %d to accelerator %d\n", request->cmd_id, ae); + } else { + /* Response received from admin message, we can now + * make response data available in "out" parameter. + */ + memcpy(out, admin->virt_addr + offset + + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); + } mutex_unlock(&admin->lock); - return received ? 0 : -EFAULT; + return ret; } static int adf_send_admin(struct adf_accel_dev *accel_dev, From 864c2d57d691b5308dbb8beca73f8ccb9e183409 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 12 Jun 2020 16:00:23 +1000 Subject: [PATCH 019/157] crypto: caam - Fix argument type in handle_imx6_err005766 The function handle_imx6_err005766 needs to take an __iomem argument as otherwise sparse will generate two warnings. Fixes: 33d69455e402 ("crypto: caam - limit AXI pipeline to a...") Signed-off-by: Herbert Xu Reviewed-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 6e0b0ddbce31..94502f1d4b48 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -469,7 +469,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl) * pipeline to a depth of 1 (from it's default of 4) to preclude this situation * from occurring. */ -static void handle_imx6_err005766(u32 *mcr) +static void handle_imx6_err005766(u32 __iomem *mcr) { if (of_machine_is_compatible("fsl,imx6q") || of_machine_is_compatible("fsl,imx6dl") || From a05b1c150f5bc0f83313a5e14b94725b5eda5f29 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 12 Jun 2020 21:16:22 +1000 Subject: [PATCH 020/157] crypto: octeontx - Fix sparse warnings This patch fixes all the sparse warnings in the octeontx driver. Some of these are just trivial type changes. However, some of the changes are non-trivial on little-endian hosts. Obviously the driver appears to be broken on either LE or BE as it was doing different things. I've taken the BE behaviour as the correct one. Reported-by: kernel test robot Signed-off-by: Herbert Xu --- .../crypto/marvell/octeontx/otx_cptpf_ucode.c | 8 +++---- .../crypto/marvell/octeontx/otx_cptpf_ucode.h | 2 +- .../crypto/marvell/octeontx/otx_cptvf_algs.c | 21 +++++++--------- .../crypto/marvell/octeontx/otx_cptvf_algs.h | 6 +++-- .../marvell/octeontx/otx_cptvf_reqmgr.c | 9 +++---- .../marvell/octeontx/otx_cptvf_reqmgr.h | 24 +++++++++---------- 6 files changed, 32 insertions(+), 38 deletions(-) diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index fec8f3b9b112..cc103b1bc224 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -878,11 +878,11 @@ static int copy_ucode_to_dma_mem(struct device *dev, /* Byte swap 64-bit */ for (i = 0; i < (ucode->size / 8); i++) - ((u64 *)ucode->align_va)[i] = + ((__be64 *)ucode->align_va)[i] = cpu_to_be64(((u64 *)ucode->align_va)[i]); /* Ucode needs 16-bit swap */ for (i = 0; i < (ucode->size / 2); i++) - ((u16 *)ucode->align_va)[i] = + ((__be16 *)ucode->align_va)[i] = cpu_to_be16(((u16 *)ucode->align_va)[i]); return 0; } @@ -1463,8 +1463,8 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev, struct otx_cpt_eng_grps *eng_grps, int pf_type) { - struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = { 0 }; - struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} }; + struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; + struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; struct tar_arch_info_t *tar_arch = NULL; char *tar_filename; int i, ret = 0; diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h index 14f02b60d0c2..8620ac87a447 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h @@ -74,7 +74,7 @@ struct otx_cpt_ucode_ver_num { struct otx_cpt_ucode_hdr { struct otx_cpt_ucode_ver_num ver_num; u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ]; - u32 code_length; + __be32 code_length; u32 padding[3]; }; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 1e0a1d70ebd3..01bf4826c03e 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -239,7 +239,6 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, struct otx_cpt_fc_ctx *fctx = &rctx->fctx; int ivsize = crypto_skcipher_ivsize(stfm); u32 start = req->cryptlen - ivsize; - u64 *ctrl_flags = NULL; gfp_t flags; flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? @@ -280,8 +279,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm)); - ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags; - *ctrl_flags = cpu_to_be64(*ctrl_flags); + fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags); /* * Storing Packet Data Information in offset @@ -692,20 +690,17 @@ static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg) static inline void swap_data32(void *buf, u32 len) { - u32 *store = (u32 *) buf; - int i = 0; - - for (i = 0 ; i < len/sizeof(u32); i++, store++) - *store = cpu_to_be32(*store); + cpu_to_be32_array(buf, buf, len / 4); } static inline void swap_data64(void *buf, u32 len) { - u64 *store = (u64 *) buf; + __be64 *dst = buf; + u64 *src = buf; int i = 0; - for (i = 0 ; i < len/sizeof(u64); i++, store++) - *store = cpu_to_be64(*store); + for (i = 0 ; i < len / 8; i++, src++, dst++) + *dst = cpu_to_be64p(src); } static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) @@ -1012,7 +1007,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, /* Unknown cipher type */ return -EINVAL; } - rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags); + rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags); req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; @@ -1032,7 +1027,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type; fctx->enc.enc_ctrl.e.mac_len = mac_len; - fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); + fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags); /* * Storing Packet Data Information in offset diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h index 67cc0025f5d5..4181b5c5c356 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h @@ -66,7 +66,8 @@ enum otx_cpt_aes_key_len { }; union otx_cpt_encr_ctrl { - u64 flags; + __be64 flags; + u64 cflags; struct { #if defined(__BIG_ENDIAN_BITFIELD) u64 enc_cipher:4; @@ -138,7 +139,8 @@ struct otx_cpt_des3_ctx { }; union otx_cpt_offset_ctrl_word { - u64 flags; + __be64 flags; + u64 cflags; struct { #if defined(__BIG_ENDIAN_BITFIELD) u64 reserved:32; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c index 239195cccf93..cbc3d7869ebe 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c @@ -202,11 +202,10 @@ static inline int setup_sgio_list(struct pci_dev *pdev, info->dlen = dlen; info->in_buffer = (u8 *)info + info_len; - ((u16 *)info->in_buffer)[0] = req->outcnt; - ((u16 *)info->in_buffer)[1] = req->incnt; + ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt); + ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt); ((u16 *)info->in_buffer)[2] = 0; ((u16 *)info->in_buffer)[3] = 0; - *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer); /* Setup gather (input) components */ if (setup_sgio_components(pdev, req->in, req->incnt, @@ -367,8 +366,6 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req, iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); - /* 64-bit swap for microcode data reads, not needed for addresses*/ - iq_cmd.cmd.u64 = cpu_to_be64(iq_cmd.cmd.u64); iq_cmd.dptr = info->dptr_baddr; iq_cmd.rptr = info->rptr_baddr; iq_cmd.cptr.u64 = 0; @@ -436,7 +433,7 @@ static int cpt_process_ccode(struct pci_dev *pdev, u8 ccode = cpt_status->s.compcode; union otx_cpt_error_code ecode; - ecode.u = be64_to_cpu(*((u64 *) cpt_info->out_buffer)); + ecode.u = be64_to_cpup((__be64 *)cpt_info->out_buffer); switch (ccode) { case CPT_COMP_E_FAULT: dev_err(&pdev->dev, diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h index a4c9ff730b13..d912fe0c532d 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h @@ -92,10 +92,10 @@ union otx_cpt_ctrl_info { union otx_cpt_iq_cmd_word0 { u64 u64; struct { - u16 opcode; - u16 param1; - u16 param2; - u16 dlen; + __be16 opcode; + __be16 param1; + __be16 param2; + __be16 dlen; } s; }; @@ -123,16 +123,16 @@ struct otx_cpt_sglist_component { union { u64 len; struct { - u16 len0; - u16 len1; - u16 len2; - u16 len3; + __be16 len0; + __be16 len1; + __be16 len2; + __be16 len3; } s; } u; - u64 ptr0; - u64 ptr1; - u64 ptr2; - u64 ptr3; + __be64 ptr0; + __be64 ptr1; + __be64 ptr2; + __be64 ptr3; }; struct otx_cpt_pending_entry { From 6ec5e8b5e7148c3ece1850dffd62a493dbfc2d22 Mon Sep 17 00:00:00 2001 From: Zhangfei Gao Date: Mon, 15 Jun 2020 11:38:37 +0800 Subject: [PATCH 021/157] crypto: hisilicon - fix strncpy warning with strscpy Use strscpy to fix the warning warning: 'strncpy' specified bound 64 equals destination size Reported-by: kernel test robot Signed-off-by: Zhangfei Gao Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 13ccb9e29a2e..9762fd1f299d 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2170,8 +2170,12 @@ static int qm_alloc_uacce(struct hisi_qm *qm) .flags = UACCE_DEV_SVA, .ops = &uacce_qm_ops, }; + int ret; - strncpy(interface.name, pdev->driver->name, sizeof(interface.name)); + ret = strscpy(interface.name, pdev->driver->name, + sizeof(interface.name)); + if (ret < 0) + return -ENAMETOOLONG; uacce = uacce_alloc(&pdev->dev, &interface); if (IS_ERR(uacce)) From 3da74a674191d2a5fe9d124d04826d548fb86877 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 15 Jun 2020 21:36:20 +1000 Subject: [PATCH 022/157] crypto: omap-des - Fix sparse/compiler warnings This patch fixes sparse endianness warnings as well as compiler warnings on 64-bit hosts. Signed-off-by: Herbert Xu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-des.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 8eda43319204..c9d38bcfd1c7 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -87,7 +87,7 @@ struct omap_des_ctx { struct omap_des_dev *dd; int keylen; - u32 key[(3 * DES_KEY_SIZE) / sizeof(u32)]; + __le32 key[(3 * DES_KEY_SIZE) / sizeof(u32)]; unsigned long flags; }; @@ -461,7 +461,7 @@ static int omap_des_crypt_dma_start(struct omap_des_dev *dd) crypto_skcipher_reqtfm(dd->req)); int err; - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zd\n", dd->total); if (!dd->pio_only) { err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, @@ -504,7 +504,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) { - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zd\n", dd->total); omap_des_dma_stop(dd); From 758f4879ba717909cae3da5623130199f561ae29 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 15 Jun 2020 21:37:38 +1000 Subject: [PATCH 023/157] crypto: omap-sham - Fix sparse/compiler warnings This patch fixes sparse endianness warnings as well as compiler warnings on 64-bit hosts. Signed-off-by: Herbert Xu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 82691a057d2a..954d703f2981 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -357,10 +357,10 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) if (big_endian) for (i = 0; i < d; i++) - hash[i] = be32_to_cpu(in[i]); + hash[i] = be32_to_cpup((__be32 *)in + i); else for (i = 0; i < d; i++) - hash[i] = le32_to_cpu(in[i]); + hash[i] = le32_to_cpup((__le32 *)in + i); } static int omap_sham_hw_init(struct omap_sham_dev *dd) @@ -522,7 +522,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length, int mlen; struct sg_mapping_iter mi; - dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", + dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n", ctx->digcnt, length, final); dd->pdata->write_ctrl(dd, length, final, 0); @@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length, struct dma_slave_config cfg; int ret; - dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", + dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n", ctx->digcnt, length, final); if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) { @@ -871,7 +871,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) nbytes += req->nbytes - rctx->offset; dev_dbg(rctx->dd->dev, - "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n", + "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n", __func__, nbytes, bs, rctx->total, rctx->offset, rctx->bufcnt); @@ -932,7 +932,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) return 0; } -struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) +static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) { struct omap_sham_dev *dd; @@ -1023,7 +1023,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) bool final = (ctx->flags & BIT(FLAGS_FINUP)) && !(dd->flags & BIT(FLAGS_HUGE)); - dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d", + dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d", ctx->total, ctx->digcnt, final); if (ctx->total < get_block_size(ctx) || @@ -1036,7 +1036,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) err = omap_sham_xmit_dma(dd, ctx->total, final); /* wait for dma completion before can take more data */ - dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); + dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt); return err; } @@ -1097,7 +1097,7 @@ static int omap_sham_finish(struct ahash_request *req) err = omap_sham_finish_hmac(req); } - dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); + dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt); return err; } From 9e27c99104707f083dccd3b4d79762859b5a0614 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 17 Jun 2020 09:48:56 -0400 Subject: [PATCH 024/157] crypto: cpt - don't sleep of CRYPTO_TFM_REQ_MAY_SLEEP was not specified There is this call chain: cvm_encrypt -> cvm_enc_dec -> cptvf_do_request -> process_request -> kzalloc where we call sleeping allocator function even if CRYPTO_TFM_REQ_MAY_SLEEP was not specified. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v4.11+ Fixes: c694b233295b ("crypto: cavium - Add the Virtual Function driver for CPT") Signed-off-by: Herbert Xu --- drivers/crypto/cavium/cpt/cptvf_algs.c | 1 + drivers/crypto/cavium/cpt/cptvf_reqmanager.c | 12 ++++++------ drivers/crypto/cavium/cpt/request_manager.h | 2 ++ 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 1be1adffff1d..2e4bf90c5798 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -200,6 +200,7 @@ static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc) int status; memset(req_info, 0, sizeof(struct cpt_request_info)); + req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0; memset(fctx, 0, sizeof(struct fc_context)); create_input_list(req, enc, enc_iv_len); create_output_list(req, enc_iv_len); diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 7a24019356b5..e343249c8d05 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -133,7 +133,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup gather (input) components */ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); - info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL); + info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->gather_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -150,7 +150,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup scatter (output) components */ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); - info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL); + info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->scatter_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -167,7 +167,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Create and initialize DPTR */ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; - info->in_buffer = kzalloc(info->dlen, GFP_KERNEL); + info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->in_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, } /* Create and initialize RPTR */ - info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL); + info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->out_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) struct cpt_vq_command vq_cmd; union cpt_inst_s cptinst; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info)) { dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); return -ENOMEM; @@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) * Get buffer for union cpt_res_s response * structure and its physical address */ - info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); + info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); ret = -ENOMEM; diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h index 3514b082eca7..1e8dd9ebcc17 100644 --- a/drivers/crypto/cavium/cpt/request_manager.h +++ b/drivers/crypto/cavium/cpt/request_manager.h @@ -62,6 +62,8 @@ struct cpt_request_info { union ctrl_info ctrl; /* User control information */ struct cptvf_request req; /* Request Information (Core specific) */ + bool may_sleep; + struct buf_ptr in[MAX_BUF_CNT]; struct buf_ptr out[MAX_BUF_CNT]; From 5ead051780404b5cb22147170acadd1994dc3236 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 17 Jun 2020 09:49:52 -0400 Subject: [PATCH 025/157] crypto: hisilicon - don't sleep of CRYPTO_TFM_REQ_MAY_SLEEP was not specified There is this call chain: sec_alg_skcipher_encrypt -> sec_alg_skcipher_crypto -> sec_alg_alloc_and_calc_split_sizes -> kcalloc where we call sleeping allocator function even if CRYPTO_TFM_REQ_MAY_SLEEP was not specified. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v4.19+ Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver") Acked-by: Jonathan Cameron Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec/sec_algs.c | 34 +++++++++++++------------ 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index c27e7160d2df..4ad4ffd90cee 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, - struct sec_dev_info *info) + struct sec_dev_info *info, + gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, - GFP_KERNEL, &sgl_next_dma); + gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; @@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow) } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, - int *steps) + int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); + sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; @@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, - struct device *dev) + struct device *dev, gfp_t gfp) { int ret, count; @@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, if (!count) return -EINVAL; - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); + *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; @@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, - *splits, *splits_nents, GFP_KERNEL); + *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; @@ -630,13 +631,13 @@ static struct sec_request_el int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, - struct sec_dev_info *info) + struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; - el = kzalloc(sizeof(*el), GFP_KERNEL); + el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; @@ -668,7 +669,7 @@ static struct sec_request_el el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, - n_ents_in, info); + n_ents_in, info, gfp); if (ret) goto err_free_el; @@ -679,7 +680,7 @@ static struct sec_request_el el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, - n_ents_out, info); + n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; @@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, - &steps); + &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, - info->dev); + info->dev, gfp); if (ret) goto err_free_split_sizes; @@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, - sec_req->len_out, info->dev); + sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } @@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, - info); + info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; From d037cb4ae20407df89491f9c2d521ac0723aa15d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 18 Jun 2020 17:00:22 +1000 Subject: [PATCH 026/157] crypto: api - Prune inclusions in crypto.h We haven't used string.h since the memcpy calls were removed so this patch removes its inclusion. The file uaccess.h isn't needed at all. However, removing it reveals that we do need to add an inclusion for refcount.h. Signed-off-by: Herbert Xu --- include/linux/crypto.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 763863dbc079..bc5d2d4bfc3d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -16,9 +16,8 @@ #include #include #include +#include #include -#include -#include #include /* From 7649d0093086982f3b99810a164cd571519092f1 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 18 Jun 2020 11:12:29 +0100 Subject: [PATCH 027/157] crypto: ccp - remove redundant assignment to variable ret The variable ret is being assigned with a value that is never read and it is being updated later with a new value. The assignment is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-ops.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 422193690fd4..d270aa792888 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1308,7 +1308,6 @@ ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return -EINVAL; } - ret = -EIO; /* Zero out all the fields of the command desc */ memset(&op, 0, sizeof(op)); From 84d840e1dd90e5fc07dedbd16df0dae7b3b3ef1a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 18 Jun 2020 11:16:25 +0100 Subject: [PATCH 028/157] crypto: img-hash - remove redundant initialization of variable err The variable err is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 0e25fc3087f3..87226b7c2795 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -330,7 +330,7 @@ static int img_hash_write_via_dma(struct img_hash_dev *hdev) static int img_hash_dma_init(struct img_hash_dev *hdev) { struct dma_slave_config dma_conf; - int err = -EINVAL; + int err; hdev->dma_lch = dma_request_chan(hdev->dev, "tx"); if (IS_ERR(hdev->dma_lch)) { From d095146cae11b89a1e4e531a9ff87731d0f5eadd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Fri, 19 Jun 2020 16:22:53 +0300 Subject: [PATCH 029/157] crypto: caam/qi2 - fix return code in ahash_finup_no_ctx() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ahash_finup_no_ctx() returns -ENOMEM in most error cases, and this is fine for almost all of them. However, the return code provided by dpaa2_caam_enqueue() (e.g. -EIO or -EBUSY) shouldn't be overridden by -ENOMEM. Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 1e90412afea2..45e9ff851e2d 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -4004,7 +4004,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; struct dpaa2_sg_entry *sg_table; - int ret; + int ret = -ENOMEM; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -4017,7 +4017,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to DMA map source\n"); - return -ENOMEM; + return ret; } } else { mapped_nents = 0; @@ -4027,7 +4027,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) edesc = qi_cache_zalloc(GFP_DMA | flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); - return -ENOMEM; + return ret; } edesc->src_nents = src_nents; @@ -4044,6 +4044,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); + ret = -ENOMEM; goto unmap; } edesc->qm_sg_bytes = qm_sg_bytes; @@ -4054,6 +4055,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) if (dma_mapping_error(ctx->dev, state->ctx_dma)) { dev_err(ctx->dev, "unable to map ctx\n"); state->ctx_dma = 0; + ret = -ENOMEM; goto unmap; } @@ -4080,7 +4082,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) unmap: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); qi_cache_free(edesc); - return -ENOMEM; + return ret; } static int ahash_update_first(struct ahash_request *req) From 9bc6165d608d676f05d8bf156a2c9923ee38d05b Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 21 Jun 2020 14:19:57 +0300 Subject: [PATCH 030/157] crypto: ccree - fix resource leak on error path Fix a small resource leak on the error path of cipher processing. Signed-off-by: Gilad Ben-Yossef Fixes: 63ee04c8b491e ("crypto: ccree - add skcipher support") Cc: Markus Elfring Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_cipher.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 872ea3ff1c6b..f144fe04748b 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -159,7 +159,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm) skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; - int rc = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); @@ -171,10 +170,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + /* Alloc hash tfm for essiv */ + ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); + if (IS_ERR(ctx_p->shash_tfm)) { + dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); + return PTR_ERR(ctx_p->shash_tfm); + } + } + /* Allocate key buffer, cache line aligned */ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) - return -ENOMEM; + goto free_shash; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); @@ -186,21 +194,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); - return -ENOMEM; + goto free_key; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); - if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { - /* Alloc hash tfm for essiv */ - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); - if (IS_ERR(ctx_p->shash_tfm)) { - dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); - return PTR_ERR(ctx_p->shash_tfm); - } - } + return 0; - return rc; +free_key: + kfree(ctx_p->user.key); +free_shash: + crypto_free_shash(ctx_p->shash_tfm); + + return -ENOMEM; } static void cc_cipher_exit(struct crypto_tfm *tfm) From 520f325a597b0277dc617691267d47420334cdf4 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 21 Jun 2020 14:19:58 +0300 Subject: [PATCH 031/157] crypto: ccree - adapt ccree essiv support to kcapi The ESSIV support in ccree was added before the kernel generic support and using a slightly different API. Brings the ccree essiv interface into compliance with kernel crypto api one. Since CryptoCell only support 256 bit AES key for ESSIV, also use a fallback if requested a smaller key size. Signed-off-by: Gilad Ben-Yossef Cc: Ard Biesheuvel Cc: Libo Wang Cc: Markus Elfring Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_cipher.c | 124 +++++++++++++++++++++++-------- 1 file changed, 93 insertions(+), 31 deletions(-) diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index f144fe04748b..68a7cb993960 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -56,6 +56,8 @@ struct cc_cipher_ctx { struct cc_cpp_key_info cpp; }; struct crypto_shash *shash_tfm; + struct crypto_skcipher *fallback_tfm; + bool fallback_on; }; static void cc_cipher_complete(struct device *dev, void *cc_req, int err); @@ -75,7 +77,6 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) case CC_AES_128_BIT_KEY_SIZE: case CC_AES_192_BIT_KEY_SIZE: if (ctx_p->cipher_mode != DRV_CIPHER_XTS && - ctx_p->cipher_mode != DRV_CIPHER_ESSIV && ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER) return 0; break; @@ -159,30 +160,49 @@ static int cc_cipher_init(struct crypto_tfm *tfm) skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; + unsigned int fallback_req_size = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); - crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), - sizeof(struct cipher_req_ctx)); - ctx_p->cipher_mode = cc_alg->cipher_mode; ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + const char *name = crypto_tfm_alg_name(tfm); + /* Alloc hash tfm for essiv */ - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); + ctx_p->shash_tfm = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(ctx_p->shash_tfm)) { dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); return PTR_ERR(ctx_p->shash_tfm); } + max_key_buf_size <<= 1; + + /* Alloc fallabck tfm or essiv when key size != 256 bit */ + ctx_p->fallback_tfm = + crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); + + if (IS_ERR(ctx_p->fallback_tfm)) { + /* Note we're still allowing registration with no fallback since it's + * better to have most modes supported than none at all. + */ + dev_warn(dev, "Error allocating fallback algo %s. Some modes may be available.\n", + name); + ctx_p->fallback_tfm = NULL; + } else { + fallback_req_size = crypto_skcipher_reqsize(ctx_p->fallback_tfm); + } } + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct cipher_req_ctx) + fallback_req_size); + /* Allocate key buffer, cache line aligned */ - ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); + ctx_p->user.key = kzalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) - goto free_shash; + goto free_fallback; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); @@ -203,7 +223,8 @@ static int cc_cipher_init(struct crypto_tfm *tfm) free_key: kfree(ctx_p->user.key); -free_shash: +free_fallback: + crypto_free_skcipher(ctx_p->fallback_tfm); crypto_free_shash(ctx_p->shash_tfm); return -ENOMEM; @@ -226,6 +247,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) /* Free hash tfm for essiv */ crypto_free_shash(ctx_p->shash_tfm); ctx_p->shash_tfm = NULL; + crypto_free_skcipher(ctx_p->fallback_tfm); + ctx_p->fallback_tfm = NULL; } /* Unmap key buffer */ @@ -309,6 +332,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, } ctx_p->keylen = keylen; + ctx_p->fallback_on = false; switch (cc_slot_to_key_type(hki.hw_key1)) { case CC_HW_PROTECTED_KEY: @@ -394,10 +418,33 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (validate_keys_sizes(ctx_p, keylen)) { - dev_dbg(dev, "Unsupported key size %d.\n", keylen); + dev_dbg(dev, "Invalid key size %d.\n", keylen); return -EINVAL; } + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + + /* We only support 256 bit ESSIV-CBC-AES keys */ + if (keylen != AES_KEYSIZE_256) { + unsigned int flags = crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_MASK; + + if (likely(ctx_p->fallback_tfm)) { + ctx_p->fallback_on = true; + crypto_skcipher_clear_flags(ctx_p->fallback_tfm, + CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(ctx_p->fallback_tfm, flags); + return crypto_skcipher_setkey(ctx_p->fallback_tfm, key, keylen); + } + + dev_dbg(dev, "Unsupported key size %d and no fallback.\n", keylen); + return -EINVAL; + } + + /* Internal ESSIV key buffer is double sized */ + max_key_buf_size <<= 1; + } + + ctx_p->fallback_on = false; ctx_p->key_type = CC_UNPROTECTED_KEY; /* @@ -425,21 +472,20 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, max_key_buf_size, DMA_TO_DEVICE); memcpy(ctx_p->user.key, key, keylen); - if (keylen == 24) - memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* sha256 for key2 - use sw implementation */ - int key_len = keylen >> 1; int err; err = crypto_shash_tfm_digest(ctx_p->shash_tfm, - ctx_p->user.key, key_len, - ctx_p->user.key + key_len); + ctx_p->user.key, keylen, + ctx_p->user.key + keylen); if (err) { dev_err(dev, "Failed to hash ESSIV key.\n"); return err; } + + keylen <<= 1; } dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); @@ -577,9 +623,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; - unsigned int key_len = ctx_p->keylen; + unsigned int key_len = (ctx_p->keylen / 2); dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; unsigned int du_size = nbytes; + unsigned int key_offset = key_len; struct cc_crypto_alg *cc_alg = container_of(tfm->__crt_alg, struct cc_crypto_alg, @@ -599,6 +646,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: case DRV_CIPHER_BITLOCKER: + + if (cipher_mode == DRV_CIPHER_ESSIV) + key_len = SHA256_DIGEST_SIZE; + /* load XEX key */ hw_desc_init(&desc[*seq_size]); set_cipher_mode(&desc[*seq_size], cipher_mode); @@ -608,12 +659,12 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, ctx_p->hw.key2_slot); } else { set_din_type(&desc[*seq_size], DMA_DLLI, - (key_dma_addr + (key_len / 2)), - (key_len / 2), NS_BIT); + (key_dma_addr + key_offset), + key_len, NS_BIT); } set_xex_data_unit_size(&desc[*seq_size], du_size); set_flow_mode(&desc[*seq_size], S_DIN_to_AES2); - set_key_size_aes(&desc[*seq_size], (key_len / 2)); + set_key_size_aes(&desc[*seq_size], key_len); set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY); (*seq_size)++; @@ -622,7 +673,7 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_config0(&desc[*seq_size], direction); - set_key_size_aes(&desc[*seq_size], (key_len / 2)); + set_key_size_aes(&desc[*seq_size], key_len); set_flow_mode(&desc[*seq_size], flow_mode); set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); @@ -873,6 +924,17 @@ static int cc_cipher_process(struct skcipher_request *req, goto exit_process; } + if (ctx_p->fallback_on) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + + *subreq = *req; + skcipher_request_set_tfm(subreq, ctx_p->fallback_tfm); + if (direction == DRV_CRYPTO_DIRECTION_ENCRYPT) + return crypto_skcipher_encrypt(subreq); + else + return crypto_skcipher_decrypt(subreq); + } + /* The IV we are handed may be allocted from the stack so * we must copy it to a DMAable buffer before use. */ @@ -1016,7 +1078,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv(paes)", + .name = "essiv(cbc(paes),sha256)", .driver_name = "essiv-paes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1034,7 +1096,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv512(paes)", + .name = "essiv512(cbc(paes),sha256)", .driver_name = "essiv-paes-du512-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1053,7 +1115,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv4096(paes)", + .name = "essiv4096(cbc(paes),sha256)", .driver_name = "essiv-paes-du4096-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1275,15 +1337,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv(aes)", + .name = "essiv(cbc(aes),sha256)", .driver_name = "essiv-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, @@ -1292,15 +1354,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv512(aes)", + .name = "essiv512(cbc(aes),sha256)", .driver_name = "essiv-aes-du512-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, @@ -1310,15 +1372,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv4096(aes)", + .name = "essiv4096(cbc(aes),sha256)", .driver_name = "essiv-aes-du4096-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, From f94907085d5f73e87578130deeef4c744ca60ab1 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 21 Jun 2020 14:19:59 +0300 Subject: [PATCH 032/157] crypto: ccree - remove unused field Remove yet another unused field left over from times gone by. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_cipher.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 68a7cb993960..076669dc1035 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -45,7 +45,6 @@ enum cc_key_type { struct cc_cipher_ctx { struct cc_drvdata *drvdata; int keylen; - int key_round_number; int cipher_mode; int flow_mode; unsigned int flags; From 5c3a8a661e1bd9d7063485ac2d2ccb512c447129 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Mon, 22 Jun 2020 10:40:08 +0800 Subject: [PATCH 033/157] crypto: sun8i-ce - Fix runtime PM imbalance in sun8i_ce_cipher_init pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a corresponding decrement is needed on the error handling path to keep the counter balanced. Fix this by adding the missed function call. Signed-off-by: Dinghao Liu Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index a6abb701bfc6..3665a0a2038f 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -358,6 +358,7 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) return 0; error_pm: + pm_runtime_put_noidle(op->ce->dev); crypto_free_sync_skcipher(op->fallback_tfm); return err; } From 8ac1b9ccbfbaeacc0c33433e3e7825593bda366e Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Mon, 22 Jun 2020 11:45:04 +0530 Subject: [PATCH 034/157] crypto: qce - support zero length test vectors crypto test module passes zero length vectors as test input to sha-1 and sha-256. To provide correct output for these vectors, hash zero support has been added as in other crypto drivers. Signed-off-by: Sivaprakash Murugesan Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 ++ drivers/crypto/qce/common.h | 2 ++ drivers/crypto/qce/sha.c | 18 +++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 802b9ada4e9e..7bc58bf99703 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -624,6 +624,8 @@ config CRYPTO_DEV_QCE_SKCIPHER config CRYPTO_DEV_QCE_SHA bool depends on CRYPTO_DEV_QCE + select CRYPTO_SHA1 + select CRYPTO_SHA256 choice prompt "Algorithms enabled for QCE acceleration" diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h index 9f989cba0f1b..85ba16418a04 100644 --- a/drivers/crypto/qce/common.h +++ b/drivers/crypto/qce/common.h @@ -87,6 +87,8 @@ struct qce_alg_template { struct ahash_alg ahash; } alg; struct qce_device *qce; + const u8 *hash_zero; + const u32 digest_size; }; void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 1ab62e7d5f3c..ed82520203f9 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -305,8 +305,12 @@ static int qce_ahash_final(struct ahash_request *req) struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; - if (!rctx->buflen) + if (!rctx->buflen) { + if (tmpl->hash_zero) + memcpy(req->result, tmpl->hash_zero, + tmpl->alg.ahash.halg.digestsize); return 0; + } rctx->last_blk = true; @@ -338,6 +342,13 @@ static int qce_ahash_digest(struct ahash_request *req) rctx->first_blk = true; rctx->last_blk = true; + if (!rctx->nbytes_orig) { + if (tmpl->hash_zero) + memcpy(req->result, tmpl->hash_zero, + tmpl->alg.ahash.halg.digestsize); + return 0; + } + return qce->async_req_enqueue(tmpl->qce, &req->base); } @@ -490,6 +501,11 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, alg->halg.digestsize = def->digestsize; alg->halg.statesize = def->statesize; + if (IS_SHA1(def->flags)) + tmpl->hash_zero = sha1_zero_message_hash; + else if (IS_SHA256(def->flags)) + tmpl->hash_zero = sha256_zero_message_hash; + base = &alg->halg.base; base->cra_blocksize = def->blocksize; base->cra_priority = 300; From a668ee56ff028139a0926e4d2248c9a573858ba0 Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Mon, 22 Jun 2020 11:45:05 +0530 Subject: [PATCH 035/157] crypto: qce - re-initialize context on import crypto testmgr deliberately corrupts the request context while passing vectors to the import. This is to make sure that drivers do not rely on request but they take all the necessary input from io vec passed to it. qce casts the request context from request parameter, since it is corrupted the sub squent hash request fails and qce hangs. To avoid this re-initialize request context on import. The qce import API alreasy takes care of taking the input vectors from passed io vec. Signed-off-by: Sivaprakash Murugesan Signed-off-by: Herbert Xu --- drivers/crypto/qce/sha.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index ed82520203f9..9e54a667d72f 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -203,10 +203,18 @@ static int qce_import_common(struct ahash_request *req, u64 in_count, static int qce_ahash_import(struct ahash_request *req, const void *in) { - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned long flags = rctx->flags; - bool hmac = IS_SHA_HMAC(flags); - int ret = -EINVAL; + struct qce_sha_reqctx *rctx; + unsigned long flags; + bool hmac; + int ret; + + ret = qce_ahash_init(req); + if (ret) + return ret; + + rctx = ahash_request_ctx(req); + flags = rctx->flags; + hmac = IS_SHA_HMAC(flags); if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { const struct sha1_state *state = in; From df12ef60c87b8fffc4d066dc6e553a77feaa6caf Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Mon, 22 Jun 2020 11:45:06 +0530 Subject: [PATCH 036/157] crypto: qce/sha - Do not modify scatterlist passed along with request Crypto test driver's test_ahash_speed calls crypto_ahash_update and crypto_ahash_final APIs repeatedly for all the available test vector buffer lengths. if we mark the end for scatterlist based on the current vector size then the subsequent vectors might fail if the later buffer lengths are higher. To avoid this, in qce do not mark the end of scatterlist in update API, the qce_ahash_async_req_handle API already takes care of this copying right amount of buffer from the request scatter list. Signed-off-by: Sivaprakash Murugesan Signed-off-by: Herbert Xu --- drivers/crypto/qce/sha.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 9e54a667d72f..c230843e2ffb 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -292,8 +292,6 @@ static int qce_ahash_update(struct ahash_request *req) if (!sg_last) return -EINVAL; - sg_mark_end(sg_last); - if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); From 8a302808c60d441d9884cb00ea7f2b534f2e3ca5 Mon Sep 17 00:00:00 2001 From: John Allen Date: Mon, 22 Jun 2020 15:24:02 -0500 Subject: [PATCH 037/157] crypto: ccp - Fix use of merged scatterlists Running the crypto manager self tests with CONFIG_CRYPTO_MANAGER_EXTRA_TESTS may result in several types of errors when using the ccp-crypto driver: alg: skcipher: cbc-des3-ccp encryption failed on test vector 0; expected_error=0, actual_error=-5 ... alg: skcipher: ctr-aes-ccp decryption overran dst buffer on test vector 0 ... alg: ahash: sha224-ccp test failed (wrong result) on test vector ... These errors are the result of improper processing of scatterlists mapped for DMA. Given a scatterlist in which entries are merged as part of mapping the scatterlist for DMA, the DMA length of a merged entry will reflect the combined length of the entries that were merged. The subsequent scatterlist entry will contain DMA information for the scatterlist entry after the last merged entry, but the non-DMA information will be that of the first merged entry. The ccp driver does not take this scatterlist merging into account. To address this, add a second scatterlist pointer to track the current position in the DMA mapped representation of the scatterlist. Both the DMA representation and the original representation of the scatterlist must be tracked as while most of the driver can use just the DMA representation, scatterlist_map_and_copy() must use the original representation and expects the scatterlist pointer to be accurate to the original representation. In order to properly walk the original scatterlist, the scatterlist must be walked until the combined lengths of the entries seen is equal to the DMA length of the current entry being processed in the DMA mapped representation. Fixes: 63b945091a070 ("crypto: ccp - CCP device driver and interface support") Signed-off-by: John Allen Cc: stable@vger.kernel.org Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.h | 1 + drivers/crypto/ccp/ccp-ops.c | 37 +++++++++++++++++++++++++----------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 3f68262d9ab4..87a34d91fdf7 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -469,6 +469,7 @@ struct ccp_sg_workarea { unsigned int sg_used; struct scatterlist *dma_sg; + struct scatterlist *dma_sg_head; struct device *dma_dev; unsigned int dma_count; enum dma_data_direction dma_dir; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index d270aa792888..a06d20263efa 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) - dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); + dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); wa->dma_count = 0; } @@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, return 0; wa->dma_sg = sg; + wa->dma_sg_head = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); @@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); + unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; - if (wa->sg_used == wa->sg->length) { - wa->sg = sg_next(wa->sg); + if (wa->sg_used == sg_dma_len(wa->dma_sg)) { + /* Advance to the next DMA scatterlist entry */ + wa->dma_sg = sg_next(wa->dma_sg); + + /* In the case that the DMA mapped scatterlist has entries + * that have been merged, the non-DMA mapped scatterlist + * must be advanced multiple times for each merged entry. + * This ensures that the current non-DMA mapped entry + * corresponds to the current DMA mapped entry. + */ + do { + sg_combined_len += wa->sg->length; + wa->sg = sg_next(wa->sg); + } while (wa->sg_used > sg_combined_len); + wa->sg_used = 0; } } @@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { - nbytes = min(sg_wa->sg->length - sg_wa->sg_used, + nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); @@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ - sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; + sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { - sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; + sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { @@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough data in the sg element, but we need to * adjust for any previously copied data */ - op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); + op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); @@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough room in the sg element, but we need to * adjust for any previously used area */ - op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } @@ -2027,7 +2042,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || - (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { + (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { ret = -EINVAL; goto e_dst; } @@ -2053,8 +2068,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - dst.sg_wa.sg_used += src.sg_wa.sg->length; - if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { + dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); + if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } From 44069737ac9625a0f02f0f7f5ab96aae4cd819bc Mon Sep 17 00:00:00 2001 From: Jian Cai Date: Mon, 22 Jun 2020 16:24:33 -0700 Subject: [PATCH 038/157] crypto: aesni - add compatibility with IAS Clang's integrated assembler complains "invalid reassignment of non-absolute variable 'var_ddq_add'" while assembling arch/x86/crypto/aes_ctrby8_avx-x86_64.S. It was because var_ddq_add was reassigned with non-absolute values several times, which IAS did not support. We can avoid the reassignment by replacing the uses of var_ddq_add with its definitions accordingly to have compatilibility with IAS. Link: https://github.com/ClangBuiltLinux/linux/issues/1008 Reported-by: Sedat Dilek Reported-by: Fangrui Song Tested-by: Sedat Dilek # build+boot Linux v5.7.5; clang v11.0.0-git Signed-off-by: Jian Cai Signed-off-by: Herbert Xu --- arch/x86/crypto/aes_ctrby8_avx-x86_64.S | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index ec437db1fa54..494a3bda8487 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -127,10 +127,6 @@ ddq_add_8: /* generate a unique variable for ddq_add_x */ -.macro setddq n - var_ddq_add = ddq_add_\n -.endm - /* generate a unique variable for xmm register */ .macro setxdata n var_xdata = %xmm\n @@ -140,9 +136,7 @@ ddq_add_8: .macro club name, id .altmacro - .if \name == DDQ_DATA - setddq %\id - .elseif \name == XDATA + .if \name == XDATA setxdata %\id .endif .noaltmacro @@ -165,9 +159,8 @@ ddq_add_8: .set i, 1 .rept (by - 1) - club DDQ_DATA, i club XDATA, i - vpaddq var_ddq_add(%rip), xcounter, var_xdata + vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata vptest ddq_low_msk(%rip), var_xdata jnz 1f vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata @@ -180,8 +173,7 @@ ddq_add_8: vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 - club DDQ_DATA, by - vpaddq var_ddq_add(%rip), xcounter, xcounter + vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter vptest ddq_low_msk(%rip), xcounter jnz 1f vpaddq ddq_high_add_1(%rip), xcounter, xcounter From f532ed2a9b9eb49a715b9c8eb7056cca296b2392 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Thu, 25 Jun 2020 13:07:12 -0700 Subject: [PATCH 039/157] crypto: hisilicon/qm - Change type of pasid to u32 PASID is defined as "int" although it's a 20-bit value and shouldn't be negative int. To be consistent with PASID type in iommu, define PASID as "u32". Suggested-by: Thomas Gleixner Signed-off-by: Fenghua Yu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 9762fd1f299d..6527c53b073f 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1732,7 +1732,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp) } EXPORT_SYMBOL_GPL(hisi_qm_release_qp); -static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) +static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; @@ -1804,7 +1804,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; int qp_id = qp->qp_id; - int pasid = arg; + u32 pasid = arg; int ret; if (!qm_qp_avail_state(qm, qp, QP_START)) From c41494311920c13d359ead4fb10b420d65238689 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 26 Jun 2020 16:06:06 +1000 Subject: [PATCH 040/157] crypto: cpt - Fix sparse warnings This patch fixes all the sparse warnings in the octeontx driver. Some of these are just trivial type changes. However, some of the changes are non-trivial on little-endian hosts. Obviously the driver appears to be broken on either LE or BE as it was doing different things. I've taken the BE behaviour as the correct one. Signed-off-by: Herbert Xu --- drivers/crypto/cavium/cpt/cptvf_algs.c | 9 ++++---- drivers/crypto/cavium/cpt/cptvf_reqmanager.c | 12 ++++------ drivers/crypto/cavium/cpt/request_manager.h | 24 ++++++++++---------- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 2e4bf90c5798..0f0991f9f294 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -99,10 +99,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm); struct cvm_req_ctx *rctx = skcipher_request_ctx(req); struct fc_context *fctx = &rctx->fctx; - u64 *offset_control = &rctx->control_word; u32 enc_iv_len = crypto_skcipher_ivsize(tfm); struct cpt_request_info *req_info = &rctx->cpt_req; - u64 *ctrl_flags = NULL; + __be64 *ctrl_flags = NULL; + __be64 *offset_control; req_info->ctrl.s.grp = 0; req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER; @@ -126,9 +126,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); else memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); - ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags; - *ctrl_flags = cpu_to_be64(*ctrl_flags); + ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags; + *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); + offset_control = (__be64 *)&rctx->control_word; *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16)); /* Storing Packet Data Information in offset * Control Word First 8 bytes diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index e343249c8d05..3878b01e19e1 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -4,6 +4,7 @@ */ #include "cptvf.h" +#include "cptvf_algs.h" #include "request_manager.h" /** @@ -173,11 +174,10 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, goto scatter_gather_clean; } - ((u16 *)info->in_buffer)[0] = req->outcnt; - ((u16 *)info->in_buffer)[1] = req->incnt; - ((u16 *)info->in_buffer)[2] = 0; - ((u16 *)info->in_buffer)[3] = 0; - *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer); + ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt); + ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt); + ((__be16 *)info->in_buffer)[2] = 0; + ((__be16 *)info->in_buffer)[3] = 0; memcpy(&info->in_buffer[8], info->gather_components, g_sz_bytes); @@ -470,8 +470,6 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); - /* 64-bit swap for microcode data reads, not needed for addresses*/ - vq_cmd.cmd.u64 = cpu_to_be64(vq_cmd.cmd.u64); vq_cmd.dptr = info->dptr_baddr; vq_cmd.rptr = info->rptr_baddr; vq_cmd.cptr.u64 = 0; diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h index 1e8dd9ebcc17..8d40e4ba3af1 100644 --- a/drivers/crypto/cavium/cpt/request_manager.h +++ b/drivers/crypto/cavium/cpt/request_manager.h @@ -75,16 +75,16 @@ struct sglist_component { union { u64 len; struct { - u16 len0; - u16 len1; - u16 len2; - u16 len3; + __be16 len0; + __be16 len1; + __be16 len2; + __be16 len3; } s; } u; - u64 ptr0; - u64 ptr1; - u64 ptr2; - u64 ptr3; + __be64 ptr0; + __be64 ptr1; + __be64 ptr2; + __be64 ptr3; }; struct cpt_info_buffer { @@ -114,10 +114,10 @@ struct cpt_info_buffer { union vq_cmd_word0 { u64 u64; struct { - u16 opcode; - u16 param1; - u16 param2; - u16 dlen; + __be16 opcode; + __be16 param1; + __be16 param2; + __be16 dlen; } s; }; From 57c8aa43b9f272c382c253573c82be5cb68fe22d Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Fri, 26 Jun 2020 14:09:39 -0500 Subject: [PATCH 041/157] crypto: ccp - Update CCP driver maintainer information Add John Allen as a new CCP driver maintainer. Additionally, break out the driver SEV support and create a new maintainer entry, with Brijesh Singh and Tom Lendacky as maintainers. Cc: John Allen Cc: Brijesh Singh Signed-off-by: Tom Lendacky Acked-by: John Allen Acked-by: Brijesh Singh Signed-off-by: Herbert Xu --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 68f21d46614c..de266ca5f921 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -830,11 +830,20 @@ F: include/uapi/rdma/efa-abi.h AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER M: Tom Lendacky +M: John Allen L: linux-crypto@vger.kernel.org S: Supported F: drivers/crypto/ccp/ F: include/linux/ccp.h +AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - SEV SUPPORT +M: Brijesh Singh +M: Tom Lendacky +L: linux-crypto@vger.kernel.org +S: Supported +F: drivers/crypto/ccp/sev* +F: include/uapi/linux/psp-sev.h + AMD DISPLAY CORE M: Harry Wentland M: Leo Li From 3cfa435c69f43be12c333744f3af29e167b554f3 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 29 Jun 2020 13:30:03 +0100 Subject: [PATCH 042/157] crypto: ux500/hash - Add namespacing to hash_init() A recent change to the Regulator consumer API (which this driver utilises) add prototypes for the some suspend functions. These functions require including header file include/linux/suspend.h. The following tree of includes affecting this driver will be present: In file included from include/linux/elevator.h:6, from include/linux/blkdev.h:288, from include/linux/blk-cgroup.h:23, from include/linux/writeback.h:14, from include/linux/memcontrol.h:22, from include/linux/swap.h:9, from include/linux/suspend.h:5, from include/linux/regulator/consumer.h:35, from drivers/crypto/ux500/hash/hash_core.c:28: include/linux/elevator.h pulls in include/linux/hashtable.h which contains its own version of hash_init(). This confuses the build system and results in the following error (amongst others): drivers/crypto/ux500/hash/hash_core.c:1362:19: error: passing argument 1 of '__hash_init' from incompatible pointer type [-Werror=incompatible-pointer-types] 1362 | return hash_init(req); Fix this by namespacing the local hash_init() such that the source of confusion is removed. Cc: Linus Walleij Cc: David S. Miller Cc: linux-crypto@vger.kernel.org Signed-off-by: Lee Jones Signed-off-by: Herbert Xu --- drivers/crypto/ux500/hash/hash_core.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index c24f2db8d5e8..a5ee8c2fb4e0 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -545,7 +545,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) * * Initialize structures. */ -static int hash_init(struct ahash_request *req) +static int ux500_hash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); @@ -1359,7 +1359,7 @@ static int ahash_sha1_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HASH; ctx->digestsize = SHA1_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int ahash_sha256_init(struct ahash_request *req) @@ -1372,7 +1372,7 @@ static int ahash_sha256_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HASH; ctx->digestsize = SHA256_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int ahash_sha1_digest(struct ahash_request *req) @@ -1425,7 +1425,7 @@ static int hmac_sha1_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HMAC; ctx->digestsize = SHA1_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int hmac_sha256_init(struct ahash_request *req) @@ -1438,7 +1438,7 @@ static int hmac_sha256_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HMAC; ctx->digestsize = SHA256_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int hmac_sha1_digest(struct ahash_request *req) @@ -1515,7 +1515,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA1, .conf.oper_mode = HASH_OPER_MODE_HASH, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = ahash_sha1_digest, @@ -1538,7 +1538,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA256, .conf.oper_mode = HASH_OPER_MODE_HASH, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = ahash_sha256_digest, @@ -1561,7 +1561,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA1, .conf.oper_mode = HASH_OPER_MODE_HMAC, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = hmac_sha1_digest, @@ -1585,7 +1585,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA256, .conf.oper_mode = HASH_OPER_MODE_HMAC, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = hmac_sha256_digest, From 9a150af05f6f73186e0017a37104b5eb061ee627 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 29 Jun 2020 18:03:53 +1000 Subject: [PATCH 043/157] hwrng: npcm - Fix W=1 unused variable warning This patch fixes an unused variable warning when this driver is built-in with CONFIG_OF=n. Reported-by: kernel test robot Signed-off-by: Herbert Xu --- drivers/char/hw_random/npcm-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c index 01d04404d8c0..5d0d13f891b7 100644 --- a/drivers/char/hw_random/npcm-rng.c +++ b/drivers/char/hw_random/npcm-rng.c @@ -161,7 +161,7 @@ static const struct dev_pm_ops npcm_rng_pm_ops = { pm_runtime_force_resume) }; -static const struct of_device_id rng_dt_id[] = { +static const struct of_device_id rng_dt_id[] __maybe_unused = { { .compatible = "nuvoton,npcm750-rng", }, {}, }; From 414a3c1b811da4ba4ebe441f3e69a17c053196a2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 29 Jun 2020 18:03:55 +1000 Subject: [PATCH 044/157] hwrng: omap - Fix W=1 unused variable warning This patch fixes an unused variable warning when this driver is built-in with CONFIG_OF=n. While we're at it this patch also expands the compiler coverage when CONFIG_OF is off by removing all the CONFIG_OF ifdefs. Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 7290c603fcb8..5cc5fc504968 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -243,7 +244,6 @@ static struct omap_rng_pdata omap2_rng_pdata = { .cleanup = omap2_rng_cleanup, }; -#if defined(CONFIG_OF) static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) { return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; @@ -358,7 +358,7 @@ static struct omap_rng_pdata eip76_rng_pdata = { .cleanup = omap4_rng_cleanup, }; -static const struct of_device_id omap_rng_of_match[] = { +static const struct of_device_id omap_rng_of_match[] __maybe_unused = { { .compatible = "ti,omap2-rng", .data = &omap2_rng_pdata, @@ -418,13 +418,6 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, } return 0; } -#else -static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng, - struct platform_device *pdev) -{ - return -EINVAL; -} -#endif static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng) { From eb515c60fac744c3e5bc16cbc4f2e0439b0a8b03 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 29 Jun 2020 18:03:57 +1000 Subject: [PATCH 045/157] hwrng: hisi - Fix W=1 unused variable warning This patch fixes an unused variable warning when this driver is built-in with CONFIG_OF=n. Signed-off-by: Herbert Xu --- drivers/char/hw_random/hisi-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c index 6815e17a9834..96438f85cafa 100644 --- a/drivers/char/hw_random/hisi-rng.c +++ b/drivers/char/hw_random/hisi-rng.c @@ -99,7 +99,7 @@ static int hisi_rng_probe(struct platform_device *pdev) return 0; } -static const struct of_device_id hisi_rng_dt_ids[] = { +static const struct of_device_id hisi_rng_dt_ids[] __maybe_unused = { { .compatible = "hisilicon,hip04-rng" }, { .compatible = "hisilicon,hip05-rng" }, { } From 64f8153a2f36d2eb82fc6e4d7eee3b08ef10371c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 29 Jun 2020 18:04:00 +1000 Subject: [PATCH 046/157] hwrng: bcm2835 - Fix W=1 unused variable warning This patch fixes an unused variable warning when this driver is built with CONFIG_OF=n. Signed-off-by: Herbert Xu --- drivers/char/hw_random/bcm2835-rng.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index cbf5eaea662c..eec26329d1ea 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -139,7 +139,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev) { const struct bcm2835_rng_of_data *of_data; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; const struct of_device_id *rng_id; struct bcm2835_rng_priv *priv; int err; @@ -166,7 +165,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev) priv->rng.cleanup = bcm2835_rng_cleanup; if (dev_of_node(dev)) { - rng_id = of_match_node(bcm2835_rng_of_match, np); + rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node); if (!rng_id) return -EINVAL; From ad23756271d5744a0a0ba556f8aaa70e358d5aa6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 29 Jun 2020 18:04:02 +1000 Subject: [PATCH 047/157] hwrng: st - Fix W=1 unused variable warning This patch fixes an unused variable warning when this driver is built-in with CONFIG_OF=n. Signed-off-by: Herbert Xu --- drivers/char/hw_random/st-rng.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c index 783c24e3f8b7..15ba1e6fae4d 100644 --- a/drivers/char/hw_random/st-rng.c +++ b/drivers/char/hw_random/st-rng.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -121,7 +122,7 @@ static int st_rng_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id st_rng_match[] = { +static const struct of_device_id st_rng_match[] __maybe_unused = { { .compatible = "st,rng" }, {}, }; From e05231a53b7f85793983e36c187aa2494649f4ad Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 29 Jun 2020 18:04:04 +1000 Subject: [PATCH 048/157] hwrng: pic32 - Fix W=1 unused variable warning This patch fixes an unused variable warning when this driver is built-in with CONFIG_OF=n. Signed-off-by: Herbert Xu --- drivers/char/hw_random/pic32-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index 81080cb2294e..e8210c1715cf 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -119,7 +119,7 @@ static int pic32_rng_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id pic32_rng_of_match[] = { +static const struct of_device_id pic32_rng_of_match[] __maybe_unused = { { .compatible = "microchip,pic32mzda-rng", }, { /* sentinel */ } }; From 74da405803c64dd6601a822b4c3e7b4ee7816058 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 29 Jun 2020 18:04:07 +1000 Subject: [PATCH 049/157] hwrng: octeon - Fix sparse warnings This patch fixes a bunch of sparse warnings by adding __force tags when casting __iomem poitners to u64. Signed-off-by: Herbert Xu --- drivers/char/hw_random/octeon-rng.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c index 7be8067ac4e8..8561a09b4681 100644 --- a/drivers/char/hw_random/octeon-rng.c +++ b/drivers/char/hw_random/octeon-rng.c @@ -33,7 +33,7 @@ static int octeon_rng_init(struct hwrng *rng) ctl.u64 = 0; ctl.s.ent_en = 1; /* Enable the entropy source. */ ctl.s.rng_en = 1; /* Enable the RNG hardware. */ - cvmx_write_csr((u64)p->control_status, ctl.u64); + cvmx_write_csr((__force u64)p->control_status, ctl.u64); return 0; } @@ -44,14 +44,14 @@ static void octeon_rng_cleanup(struct hwrng *rng) ctl.u64 = 0; /* Disable everything. */ - cvmx_write_csr((u64)p->control_status, ctl.u64); + cvmx_write_csr((__force u64)p->control_status, ctl.u64); } static int octeon_rng_data_read(struct hwrng *rng, u32 *data) { struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); - *data = cvmx_read64_uint32((u64)p->result); + *data = cvmx_read64_uint32((__force u64)p->result); return sizeof(u32); } From 528f776df67c440361b2847b4da400d8754bf030 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 29 Jun 2020 18:16:17 +0100 Subject: [PATCH 050/157] crypto: qat - allow xts requests not multiple of block Allow AES-XTS requests that are not multiple of the block size. If a request is smaller than the block size, return -EINVAL. This fixes the following issue reported by the crypto testmgr self-test: alg: skcipher: qat_aes_xts encryption failed on test vector "random: len=116 klen=64"; expected_error=0, actual_error=-22, cfg="random: inplace may_sleep use_finup src_divs=[45.85%@+4077, 54.15%@alignmask+18]" Fixes: 96ee111a659e ("crypto: qat - return error for block...") Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 833d1a75666f..6bea6f868395 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "adf_accel_devices.h" #include "adf_transport.h" @@ -1058,6 +1059,14 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) return qat_alg_skcipher_encrypt(req); } +static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req) +{ + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + + return qat_alg_skcipher_encrypt(req); +} + static int qat_alg_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); @@ -1117,6 +1126,15 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) return qat_alg_skcipher_decrypt(req); } + +static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req) +{ + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + + return qat_alg_skcipher_decrypt(req); +} + static int qat_alg_aead_init(struct crypto_aead *tfm, enum icp_qat_hw_auth_algo hash, const char *hash_name) @@ -1310,8 +1328,8 @@ static struct skcipher_alg qat_skciphers[] = { { .init = qat_alg_skcipher_init_tfm, .exit = qat_alg_skcipher_exit_tfm, .setkey = qat_alg_skcipher_xts_setkey, - .decrypt = qat_alg_skcipher_blk_decrypt, - .encrypt = qat_alg_skcipher_blk_encrypt, + .decrypt = qat_alg_skcipher_xts_decrypt, + .encrypt = qat_alg_skcipher_xts_encrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, From b185a68710e074eeaffb595a31f9bb617c2aa890 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 29 Jun 2020 18:16:18 +0100 Subject: [PATCH 051/157] crypto: qat - validate xts key Validate AES-XTS key using the function xts_verify_key() to prevent malformed keys. Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 6bea6f868395..11f36eafda0c 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -995,6 +995,12 @@ static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm, static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { + int ret; + + ret = xts_verify_key(tfm, key, keylen); + if (ret) + return ret; + return qat_alg_skcipher_setkey(tfm, key, keylen, ICP_QAT_HW_CIPHER_XTS_MODE); } From 5fb8b70d206fe25c35abc2256bfc0d4e64f1af4d Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 29 Jun 2020 18:16:19 +0100 Subject: [PATCH 052/157] crypto: qat - remove unused field in skcipher ctx Remove tfm field in qat_alg_skcipher_ctx structure. This is not used. Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 11f36eafda0c..77bdff0118f7 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -88,7 +88,6 @@ struct qat_alg_skcipher_ctx { struct icp_qat_fw_la_bulk_req enc_fw_req; struct icp_qat_fw_la_bulk_req dec_fw_req; struct qat_crypto_instance *inst; - struct crypto_skcipher *tfm; }; static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) @@ -1197,10 +1196,7 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm) static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm) { - struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request)); - ctx->tfm = tfm; return 0; } From a85211f36f3d637cd9c8e347ae5d37b72837dafb Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 29 Jun 2020 18:16:20 +0100 Subject: [PATCH 053/157] crypto: qat - fallback for xts with 192 bit keys Forward requests to another provider if the key length for AES-XTS is 192 bits as this is not supported by the QAT accelerators. This fixes the following issue reported with the option CONFIG_CRYPTO_MANAGER_EXTRA_TESTS: alg: skcipher: qat_aes_xts setkey failed on test vector "random: len=3204 klen=48"; expected_error=0, actual_error=-22, flags=0x1 Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 68 ++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 77bdff0118f7..2a353e249e1c 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -88,6 +88,8 @@ struct qat_alg_skcipher_ctx { struct icp_qat_fw_la_bulk_req enc_fw_req; struct icp_qat_fw_la_bulk_req dec_fw_req; struct qat_crypto_instance *inst; + struct crypto_skcipher *ftfm; + bool fallback; }; static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) @@ -994,12 +996,25 @@ static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm, static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; ret = xts_verify_key(tfm, key, keylen); if (ret) return ret; + if (keylen >> 1 == AES_KEYSIZE_192) { + ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen); + if (ret) + return ret; + + ctx->fallback = true; + + return 0; + } + + ctx->fallback = false; + return qat_alg_skcipher_setkey(tfm, key, keylen, ICP_QAT_HW_CIPHER_XTS_MODE); } @@ -1066,9 +1081,19 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req) { + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm); + struct skcipher_request *nreq = skcipher_request_ctx(req); + if (req->cryptlen < XTS_BLOCK_SIZE) return -EINVAL; + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + skcipher_request_set_tfm(nreq, ctx->ftfm); + return crypto_skcipher_encrypt(nreq); + } + return qat_alg_skcipher_encrypt(req); } @@ -1134,9 +1159,19 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req) { + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm); + struct skcipher_request *nreq = skcipher_request_ctx(req); + if (req->cryptlen < XTS_BLOCK_SIZE) return -EINVAL; + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + skcipher_request_set_tfm(nreq, ctx->ftfm); + return crypto_skcipher_decrypt(nreq); + } + return qat_alg_skcipher_decrypt(req); } @@ -1200,6 +1235,24 @@ static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm) return 0; } +static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm) +{ + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int reqsize; + + ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ftfm)) + return PTR_ERR(ctx->ftfm); + + reqsize = max(sizeof(struct qat_crypto_request), + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(ctx->ftfm)); + crypto_skcipher_set_reqsize(tfm, reqsize); + + return 0; +} + static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm) { struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -1227,6 +1280,15 @@ static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm) qat_crypto_put_instance(inst); } +static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm) +{ + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->ftfm) + crypto_free_skcipher(ctx->ftfm); + + qat_alg_skcipher_exit_tfm(tfm); +} static struct aead_alg qat_aeads[] = { { .base = { @@ -1321,14 +1383,14 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "xts(aes)", .base.cra_driver_name = "qat_aes_xts", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, - .init = qat_alg_skcipher_init_tfm, - .exit = qat_alg_skcipher_exit_tfm, + .init = qat_alg_skcipher_init_xts_tfm, + .exit = qat_alg_skcipher_exit_xts_tfm, .setkey = qat_alg_skcipher_xts_setkey, .decrypt = qat_alg_skcipher_xts_decrypt, .encrypt = qat_alg_skcipher_xts_encrypt, From 1186b429fc0124828d48148b8b261322e22b5248 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 30 Jun 2020 14:20:37 +1000 Subject: [PATCH 054/157] hwrng: ba431 - Add dependency on HAS_IOMEM The ba431 driver depends on HAS_IOMEM and this was missing from the Kconfig file. Reported-by: kernel test robot Fixes: 0289e9be5dc2 ("hwrng: ba431 - add support for BA431 hwrng") Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 7b876dfdbaaf..edbaf6154764 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -76,6 +76,7 @@ config HW_RANDOM_ATMEL config HW_RANDOM_BA431 tristate "Silex Insight BA431 Random Number Generator support" + depends on HAS_IOMEM default HW_RANDOM help This driver provides kernel-side support for the Random Number From e4ca592c4d5289838d7737defe749802f08f0eac Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 30 Jun 2020 15:39:41 +0200 Subject: [PATCH 055/157] hwrng: ba431 - HW_RANDOM_BA431 should not default to y As HW_RANDOM_BA431 does not have any platform dependency, it should not default to enabled. Fixes: 0289e9be5dc26d84 ("hwrng: ba431 - add support for BA431 hwrng") Signed-off-by: Geert Uytterhoeven Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index edbaf6154764..8478eb757f3c 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -77,7 +77,6 @@ config HW_RANDOM_ATMEL config HW_RANDOM_BA431 tristate "Silex Insight BA431 Random Number Generator support" depends on HAS_IOMEM - default HW_RANDOM help This driver provides kernel-side support for the Random Number Generator hardware based on Silex Insight BA431 IP. @@ -85,8 +84,6 @@ config HW_RANDOM_BA431 To compile this driver as a module, choose M here: the module will be called ba431-rng. - If unsure, say Y. - config HW_RANDOM_BCM2835 tristate "Broadcom BCM2835/BCM63xx Random Number Generator support" depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \ From d4f6d923238dbdb69b8525e043fedef2670d4f2c Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Wed, 1 Jul 2020 22:09:46 +0200 Subject: [PATCH 056/157] hwrng: bcm2835 - Constify bcm2835_rng_devtype[] bcm2835_rng_devtype[] is not modified and can be made const to allow the compiler to put it in read-only memory. Before: text data bss dec hex filename 2392 176 0 2568 a08 drivers/char/hw_random/bcm2835-rng.o After: text data bss dec hex filename 2464 104 0 2568 a08 drivers/char/hw_random/bcm2835-rng.o Signed-off-by: Rikard Falkeborn Acked-by: Florian Fainelli Signed-off-by: Herbert Xu --- drivers/char/hw_random/bcm2835-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index eec26329d1ea..1a7c43b43c6b 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -187,7 +187,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev) MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); -static struct platform_device_id bcm2835_rng_devtype[] = { +static const struct platform_device_id bcm2835_rng_devtype[] = { { .name = "bcm2835-rng" }, { .name = "bcm63xx-rng" }, { /* sentinel */ } From b242973f0fae47563917c7bd2ac6bf2b72cdafe8 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Wed, 1 Jul 2020 22:09:47 +0200 Subject: [PATCH 057/157] hwrng: nomadik - Constify nmk_rng_ids[] nmk_rng_ids[] is not modified and can be made const to allow the compiler to put it in read-only memory. Before: text data bss dec hex filename 652 216 4 872 368 drivers/char/hw_random/nomadik-rng.o After: text data bss dec hex filename 676 192 4 872 368 drivers/char/hw_random/nomadik-rng.o Signed-off-by: Rikard Falkeborn Acked-by: Arnd Bergmann Signed-off-by: Herbert Xu --- drivers/char/hw_random/nomadik-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index 74ed29f42e4f..b0ded41eb865 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -76,7 +76,7 @@ static int nmk_rng_remove(struct amba_device *dev) return 0; } -static struct amba_id nmk_rng_ids[] = { +static const struct amba_id nmk_rng_ids[] = { { .id = 0x000805e1, .mask = 0x000fffff, /* top bits are rev and cfg: accept all */ From a024d70ae29825b117f2c80c53345053f074cdc1 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Wed, 1 Jul 2020 22:09:48 +0200 Subject: [PATCH 058/157] hwrng: virtio - Constify id_table[] id_table[] is not modified and an be made const to allow the compiler to put it in read-only memory. Before: text data bss dec hex filename 1746 192 8 1946 79a drivers/char/hw_random/virtio-rng.o After: text data bss dec hex filename 1762 176 8 1946 79a drivers/char/hw_random/virtio-rng.o Signed-off-by: Rikard Falkeborn Signed-off-by: Herbert Xu --- drivers/char/hw_random/virtio-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 79a6e47b5fbc..a90001e02bf7 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -195,7 +195,7 @@ static int virtrng_restore(struct virtio_device *vdev) } #endif -static struct virtio_device_id id_table[] = { +static const struct virtio_device_id id_table[] = { { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, { 0 }, }; From ad6a0664c29c698bfe5c46c49f627af6f5bb172b Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Wed, 1 Jul 2020 22:29:36 +0200 Subject: [PATCH 059/157] crypto: virtio - constify features[] and id_table[] features[] and id_table[] are not modified and can be made const to allow the compiler to put them in read-only memory. Before: text data bss dec hex filename 11534 2056 160 13750 35b6 drivers/crypto/virtio/virtio_crypto_core.o After: text data bss dec hex filename 11630 1992 128 13750 35b6 drivers/crypto/virtio/virtio_crypto_core.o Signed-off-by: Rikard Falkeborn Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index c8a962c62663..77e744eaedd0 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -498,11 +498,11 @@ free_vqs: } #endif -static unsigned int features[] = { +static const unsigned int features[] = { /* none */ }; -static struct virtio_device_id id_table[] = { +static const struct virtio_device_id id_table[] = { { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID }, { 0 }, }; From 662bb52f50bca16a74fe92b487a14d7dccb85e1a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 2 Jul 2020 13:32:21 +1000 Subject: [PATCH 060/157] crypto: af_alg - Fix regression on empty requests Some user-space programs rely on crypto requests that have no control metadata. This broke when a check was added to require the presence of control metadata with the ctx->init flag. This patch fixes the regression by setting ctx->init as long as one sendmsg(2) has been made, with or without a control message. Reported-by: Sachin Sant Reported-by: Naresh Kamboju Fixes: f3c802a1f300 ("crypto: algif_aead - Only wake up when...") Signed-off-by: Herbert Xu --- crypto/af_alg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 9fcb91ea10c4..5882ed46f1ad 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -851,6 +851,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, err = -EINVAL; goto unlock; } + ctx->init = true; if (init) { ctx->enc = enc; @@ -858,7 +859,6 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, memcpy(ctx->iv, con.iv->iv, ivsize); ctx->aead_assoclen = con.aead_assoclen; - ctx->init = true; } while (size) { From d9dd5ef3d32b20b5568a28c195dec8b1a20902cf Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 3 Jul 2020 14:46:52 +1000 Subject: [PATCH 061/157] crypto: ccp - Fix sparse warnings This patch fixes a number of endianness marking issues in the ccp driver. Signed-off-by: Herbert Xu Acked-by: John Allen Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v5.c | 8 ++++---- drivers/crypto/ccp/ccp-dev.h | 12 ++++++------ drivers/crypto/ccp/ccp-ops.c | 5 ++--- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 82ac4c14c04c..7838f63bab32 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -221,8 +221,8 @@ static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) static int ccp5_do_cmd(struct ccp5_desc *desc, struct ccp_cmd_queue *cmd_q) { - u32 *mP; - __le32 *dP; + __le32 *mP; + u32 *dP; u32 tail; int i; int ret = 0; @@ -235,8 +235,8 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, } mutex_lock(&cmd_q->q_mutex); - mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; - dP = (__le32 *) desc; + mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; + dP = (u32 *)desc; for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 87a34d91fdf7..a5d9123a22ea 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -597,8 +597,8 @@ struct dword3 { }; union dword4 { - __le32 dst_lo; /* NON-SHA */ - __le32 sha_len_lo; /* SHA */ + u32 dst_lo; /* NON-SHA */ + u32 sha_len_lo; /* SHA */ }; union dword5 { @@ -608,7 +608,7 @@ union dword5 { unsigned int rsvd1:13; unsigned int fixed:1; } fields; - __le32 sha_len_hi; + u32 sha_len_hi; }; struct dword7 { @@ -619,12 +619,12 @@ struct dword7 { struct ccp5_desc { struct dword0 dw0; - __le32 length; - __le32 src_lo; + u32 length; + u32 src_lo; struct dword3 dw3; union dword4 dw4; union dword5 dw5; - __le32 key_lo; + u32 key_lo; struct dword7 dw7; }; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index a06d20263efa..bd270e66185e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -632,13 +632,12 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) struct ccp_data src, dst; struct ccp_data aad; struct ccp_op op; - - unsigned long long *final; unsigned int dm_offset; unsigned int authsize; unsigned int jobid; unsigned int ilen; bool in_place = true; /* Default value */ + __be64 *final; int ret; struct scatterlist *p_inp, sg_inp[2]; @@ -840,7 +839,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) DMA_BIDIRECTIONAL); if (ret) goto e_dst; - final = (unsigned long long *) final_wa.address; + final = (__be64 *)final_wa.address; final[0] = cpu_to_be64(aes->aad_len * 8); final[1] = cpu_to_be64(ilen * 8); From 3347c8a079d67af21760a78cc5f2abbcf06d9571 Mon Sep 17 00:00:00 2001 From: Sedat Dilek Date: Fri, 3 Jul 2020 16:32:06 +0200 Subject: [PATCH 062/157] crypto: aesni - Fix build with LLVM_IAS=1 When building with LLVM_IAS=1 means using Clang's Integrated Assembly (IAS) from LLVM/Clang >= v10.0.1-rc1+ instead of GNU/as from GNU/binutils I see the following breakage in Debian/testing AMD64: :15:74: error: too many positional arguments PRECOMPUTE 8*3+8(%rsp), %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, ^ arch/x86/crypto/aesni-intel_asm.S:1598:2: note: while in macro instantiation GCM_INIT %r9, 8*3 +8(%rsp), 8*3 +16(%rsp), 8*3 +24(%rsp) ^ :47:2: error: unknown use of instruction mnemonic without a size suffix GHASH_4_ENCRYPT_4_PARALLEL_dec %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc ^ arch/x86/crypto/aesni-intel_asm.S:1599:2: note: while in macro instantiation GCM_ENC_DEC dec ^ :15:74: error: too many positional arguments PRECOMPUTE 8*3+8(%rsp), %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, ^ arch/x86/crypto/aesni-intel_asm.S:1686:2: note: while in macro instantiation GCM_INIT %r9, 8*3 +8(%rsp), 8*3 +16(%rsp), 8*3 +24(%rsp) ^ :47:2: error: unknown use of instruction mnemonic without a size suffix GHASH_4_ENCRYPT_4_PARALLEL_enc %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc ^ arch/x86/crypto/aesni-intel_asm.S:1687:2: note: while in macro instantiation GCM_ENC_DEC enc Craig Topper suggested me in ClangBuiltLinux issue #1050: > I think the "too many positional arguments" is because the parser isn't able > to handle the trailing commas. > > The "unknown use of instruction mnemonic" is because the macro was named > GHASH_4_ENCRYPT_4_PARALLEL_DEC but its being instantiated with > GHASH_4_ENCRYPT_4_PARALLEL_dec I guess gas ignores case on the > macro instantiation, but llvm doesn't. First, I removed the trailing comma in the PRECOMPUTE line. Second, I substituted: 1. GHASH_4_ENCRYPT_4_PARALLEL_DEC -> GHASH_4_ENCRYPT_4_PARALLEL_dec 2. GHASH_4_ENCRYPT_4_PARALLEL_ENC -> GHASH_4_ENCRYPT_4_PARALLEL_enc With these changes I was able to build with LLVM_IAS=1 and boot on bare metal. I confirmed that this works with Linux-kernel v5.7.5 final. NOTE: This patch is on top of Linux v5.7 final. Thanks to Craig and especially Nick for double-checking and his comments. Suggested-by: Craig Topper Suggested-by: Craig Topper Suggested-by: Nick Desaulniers Reviewed-by: Nick Desaulniers Cc: "ClangBuiltLinux" Link: https://github.com/ClangBuiltLinux/linux/issues/1050 Link: https://bugs.llvm.org/show_bug.cgi?id=24494 Signed-off-by: Sedat Dilek Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_asm.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 54e7d15dbd0d..7d4298e6d4cb 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -266,7 +266,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff PSHUFB_XMM %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv - PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ @@ -978,7 +978,7 @@ _initial_blocks_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 @@ -1186,7 +1186,7 @@ aes_loop_par_enc_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 From 7bc13b5b60e9412a7ddef300ce2c661eecd1fd5d Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sun, 5 Jul 2020 21:18:58 +1200 Subject: [PATCH 063/157] crypto: api - permit users to specify numa node of acomp hardware For a Linux server with NUMA, there are possibly multiple (de)compressors which are either local or remote to some NUMA node. Some drivers will automatically use the (de)compressor near the CPU calling acomp_alloc(). However, it is not necessarily correct because users who send acomp_req could be from different NUMA node with the CPU which allocates acomp. Just like kernel has kmalloc() and kmalloc_node(), here crypto can have same support. Cc: Seth Jennings Cc: Dan Streetman Cc: Vitaly Wool Cc: Andrew Morton Cc: Jonathan Cameron Signed-off-by: Barry Song Signed-off-by: Herbert Xu --- crypto/acompress.c | 8 ++++++++ crypto/api.c | 24 +++++++++++++++--------- crypto/internal.h | 23 +++++++++++++++++++---- include/crypto/acompress.h | 18 ++++++++++++++++++ include/linux/crypto.h | 2 ++ 5 files changed, 62 insertions(+), 13 deletions(-) diff --git a/crypto/acompress.c b/crypto/acompress.c index 84a76723e851..c32c72048a1c 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp); +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node) +{ + return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask, + node); +} +EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); + struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); diff --git a/crypto/api.c b/crypto/api.c index edcf690800d4..5d8fe60b36c1 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -433,8 +433,9 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend) +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, + int node) { char *mem; struct crypto_tfm *tfm = NULL; @@ -445,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg, tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); - mem = kzalloc(total, GFP_KERNEL); + mem = kzalloc_node(total, GFP_KERNEL, node); if (mem == NULL) goto out_err; tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; + tfm->node = node; err = frontend->init_tfm(tfm); if (err) @@ -472,7 +474,7 @@ out_err: out: return mem; } -EXPORT_SYMBOL_GPL(crypto_create_tfm); +EXPORT_SYMBOL_GPL(crypto_create_tfm_node); struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, @@ -490,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name, EXPORT_SYMBOL_GPL(crypto_find_alg); /* - * crypto_alloc_tfm - Locate algorithm and allocate transform + * crypto_alloc_tfm_node - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @frontend: Frontend algorithm type * @type: Type of algorithm * @mask: Mask for type comparison + * @node: NUMA node in which users desire to put requests, if node is + * NUMA_NO_NODE, it means users have no special requirement. * * crypto_alloc_tfm() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable @@ -509,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg); * * In case of error the return value is an error pointer. */ -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask) + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node) { void *tfm; int err; @@ -524,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name, goto err; } - tfm = crypto_create_tfm(alg, frontend); + tfm = crypto_create_tfm_node(alg, frontend, node); if (!IS_ERR(tfm)) return tfm; @@ -542,7 +548,7 @@ err: return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(crypto_alloc_tfm); +EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node); /* * crypto_destroy_tfm - Free crypto transform diff --git a/crypto/internal.h b/crypto/internal.h index ff06a3bd1ca1..1b92a5a61852 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -68,13 +68,28 @@ void crypto_remove_final(struct list_head *list); void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend); +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, int node); + +static inline void *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE); +} + struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask); + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node); + +static inline void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask) +{ + return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE); +} int crypto_probing_notify(unsigned long val, void *v); diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 2b4d2b06ccbd..fcde59c65a81 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -106,6 +106,24 @@ struct acomp_alg { */ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, u32 mask); +/** + * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * @node: specifies the NUMA node the ZIP hardware belongs to + * + * Allocate a handle for a compression algorithm. Drivers should try to use + * (de)compressors on the specified NUMA node. + * The returned struct crypto_acomp is the handle that is required for any + * subsequent API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node); static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index bc5d2d4bfc3d..7cd2d00f0a05 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -594,6 +594,8 @@ int crypto_has_alg(const char *name, u32 type, u32 mask); struct crypto_tfm { u32 crt_flags; + + int node; void (*exit)(struct crypto_tfm *tfm); From 813ec3f1fe517acd533d44536195910edaaeebef Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sun, 5 Jul 2020 21:18:59 +1200 Subject: [PATCH 064/157] crypto: hisilicon/zip - permit users to specify NUMA node If users don't specify NUMA node, the driver will use the ZIP module near the CPU allocating acomp. Otherwise, it uses the ZIP module according to the requirement of users. Cc: Zhou Wang Signed-off-by: Barry Song Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip.h | 2 +- drivers/crypto/hisilicon/zip/zip_crypto.c | 6 +++--- drivers/crypto/hisilicon/zip/zip_main.c | 5 +++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index f3ed4c0e5493..4484be13812b 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -76,7 +76,7 @@ struct hisi_zip_sqe { u32 rsvd1[4]; }; -int zip_create_qps(struct hisi_qp **qps, int ctx_num); +int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); int hisi_zip_register_to_crypto(void); void hisi_zip_unregister_from_crypto(void); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index c73707c2e539..01fd6a78111d 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -158,13 +158,13 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) hisi_qm_release_qp(ctx->qp); } -static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type) +static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node) { struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; struct hisi_zip *hisi_zip; int ret, i, j; - ret = zip_create_qps(qps, HZIP_CTX_Q_NUM); + ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node); if (ret) { pr_err("Can not create zip qps!\n"); return -ENODEV; @@ -379,7 +379,7 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm) struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); int ret; - ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name)); + ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node); if (ret) return ret; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 2229a21ae7c8..e2845b2c963d 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -234,9 +234,10 @@ static const struct pci_device_id hisi_zip_dev_ids[] = { }; MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); -int zip_create_qps(struct hisi_qp **qps, int qp_num) +int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) { - int node = cpu_to_node(smp_processor_id()); + if (node == NUMA_NO_NODE) + node = cpu_to_node(smp_processor_id()); return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } From 37b6aab68fae17484173776ac8461613f96f6642 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 29 Jun 2020 09:39:21 +0200 Subject: [PATCH 065/157] crypto: arm64/ghash - drop PMULL based shash There are two ways to implement SIMD accelerated GCM on arm64: - using the PMULL instructions for carryless 64x64->128 multiplication, in which case the architecture guarantees that the AES instructions are available as well, and so we can use the AEAD implementation that combines both, - using the PMULL instructions for carryless 8x8->16 bit multiplication, which is implemented as a shash, and can be combined with any ctr(aes) implementation by the generic GCM AEAD template driver. So let's drop the 64x64->128 shash driver, which is never needed for GCM, and not suitable for use anywhere else. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/ghash-ce-glue.c | 90 +++++-------------------------- 1 file changed, 12 insertions(+), 78 deletions(-) diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 22831d3b7f62..be63d8b5152c 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -113,12 +113,8 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, /* avoid hogging the CPU for too long */ #define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE) -static int __ghash_update(struct shash_desc *desc, const u8 *src, - unsigned int len, - void (*simd_update)(int blocks, u64 dg[], - const char *src, - struct ghash_key const *k, - const char *head)) +static int ghash_update(struct shash_desc *desc, const u8 *src, + unsigned int len) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -145,7 +141,7 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src, ghash_do_update(chunk, ctx->digest, src, key, partial ? ctx->buf : NULL, - simd_update); + pmull_ghash_update_p8); blocks -= chunk; src += chunk * GHASH_BLOCK_SIZE; @@ -157,19 +153,7 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src, return 0; } -static int ghash_update_p8(struct shash_desc *desc, const u8 *src, - unsigned int len) -{ - return __ghash_update(desc, src, len, pmull_ghash_update_p8); -} - -static int ghash_update_p64(struct shash_desc *desc, const u8 *src, - unsigned int len) -{ - return __ghash_update(desc, src, len, pmull_ghash_update_p64); -} - -static int ghash_final_p8(struct shash_desc *desc, u8 *dst) +static int ghash_final(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -189,26 +173,6 @@ static int ghash_final_p8(struct shash_desc *desc, u8 *dst) return 0; } -static int ghash_final_p64(struct shash_desc *desc, u8 *dst) -{ - struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); - unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; - - if (partial) { - struct ghash_key *key = crypto_shash_ctx(desc->tfm); - - memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - - ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, - pmull_ghash_update_p64); - } - put_unaligned_be64(ctx->digest[1], dst); - put_unaligned_be64(ctx->digest[0], dst + 8); - - *ctx = (struct ghash_desc_ctx){}; - return 0; -} - static void ghash_reflect(u64 h[], const be128 *k) { u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0; @@ -254,7 +218,7 @@ static int ghash_setkey(struct crypto_shash *tfm, return __ghash_setkey(key, inkey, keylen); } -static struct shash_alg ghash_alg[] = {{ +static struct shash_alg ghash_alg = { .base.cra_name = "ghash", .base.cra_driver_name = "ghash-neon", .base.cra_priority = 150, @@ -264,25 +228,11 @@ static struct shash_alg ghash_alg[] = {{ .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, - .update = ghash_update_p8, - .final = ghash_final_p8, + .update = ghash_update, + .final = ghash_final, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), -}, { - .base.cra_name = "ghash", - .base.cra_driver_name = "ghash-ce", - .base.cra_priority = 200, - .base.cra_blocksize = GHASH_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ghash_key), - .base.cra_module = THIS_MODULE, - - .digestsize = GHASH_DIGEST_SIZE, - .init = ghash_init, - .update = ghash_update_p64, - .final = ghash_final_p64, - .setkey = ghash_setkey, - .descsize = sizeof(struct ghash_desc_ctx), -}}; +}; static int num_rounds(struct crypto_aes_ctx *ctx) { @@ -641,37 +591,21 @@ static struct aead_alg gcm_aes_alg = { static int __init ghash_ce_mod_init(void) { - int ret; - if (!cpu_have_named_feature(ASIMD)) return -ENODEV; if (cpu_have_named_feature(PMULL)) - ret = crypto_register_shashes(ghash_alg, - ARRAY_SIZE(ghash_alg)); - else - /* only register the first array element */ - ret = crypto_register_shash(ghash_alg); + return crypto_register_aead(&gcm_aes_alg); - if (ret) - return ret; - - if (cpu_have_named_feature(PMULL)) { - ret = crypto_register_aead(&gcm_aes_alg); - if (ret) - crypto_unregister_shashes(ghash_alg, - ARRAY_SIZE(ghash_alg)); - } - return ret; + return crypto_register_shash(&ghash_alg); } static void __exit ghash_ce_mod_exit(void) { if (cpu_have_named_feature(PMULL)) - crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg)); + crypto_unregister_aead(&gcm_aes_alg); else - crypto_unregister_shash(ghash_alg); - crypto_unregister_aead(&gcm_aes_alg); + crypto_unregister_shash(&ghash_alg); } static const struct cpu_feature ghash_cpu_feature[] = { From 94fe4501cd2a9989d1a134423e12435482c12120 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 29 Jun 2020 09:39:22 +0200 Subject: [PATCH 066/157] crypto: arm64/gcm - disentangle ghash and gcm setkey() routines The remaining ghash implementation does not support aggregation, and so there is no point in including the precomputed powers of H in the key struct. So move that into the GCM setkey routine, and get rid of the shared sub-routine entirely. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/ghash-ce-glue.c | 47 +++++++++++++++---------------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index be63d8b5152c..921fa69b5ded 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -184,29 +184,6 @@ static void ghash_reflect(u64 h[], const be128 *k) h[1] ^= 0xc200000000000000UL; } -static int __ghash_setkey(struct ghash_key *key, - const u8 *inkey, unsigned int keylen) -{ - be128 h; - - /* needed for the fallback */ - memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); - - ghash_reflect(key->h, &key->k); - - h = key->k; - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h2, &h); - - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h3, &h); - - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h4, &h); - - return 0; -} - static int ghash_setkey(struct crypto_shash *tfm, const u8 *inkey, unsigned int keylen) { @@ -215,7 +192,11 @@ static int ghash_setkey(struct crypto_shash *tfm, if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; - return __ghash_setkey(key, inkey, keylen); + /* needed for the fallback */ + memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); + + ghash_reflect(key->h, &key->k); + return 0; } static struct shash_alg ghash_alg = { @@ -251,6 +232,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, { struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm); u8 key[GHASH_BLOCK_SIZE]; + be128 h; int ret; ret = aes_expandkey(&ctx->aes_key, inkey, keylen); @@ -259,7 +241,22 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){}); - return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128)); + /* needed for the fallback */ + memcpy(&ctx->ghash_key.k, key, GHASH_BLOCK_SIZE); + + ghash_reflect(ctx->ghash_key.h, &ctx->ghash_key.k); + + h = ctx->ghash_key.k; + gf128mul_lle(&h, &ctx->ghash_key.k); + ghash_reflect(ctx->ghash_key.h2, &h); + + gf128mul_lle(&h, &ctx->ghash_key.k); + ghash_reflect(ctx->ghash_key.h3, &h); + + gf128mul_lle(&h, &ctx->ghash_key.k); + ghash_reflect(ctx->ghash_key.h4, &h); + + return 0; } static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) From 17d0fb1febbc34839a1ed5af8622478691d870df Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 29 Jun 2020 09:39:23 +0200 Subject: [PATCH 067/157] crypto: arm64/gcm - use variably sized key struct Now that the ghash and gcm drivers are split, we no longer need to allocate a key struct for the former that carries powers of H that are only used by the latter. Also, take this opportunity to clean up the code a little bit. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/ghash-ce-glue.c | 49 +++++++++++++------------------ 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 921fa69b5ded..2ae95dcf648f 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -31,12 +31,8 @@ MODULE_ALIAS_CRYPTO("ghash"); #define GCM_IV_SIZE 12 struct ghash_key { - u64 h[2]; - u64 h2[2]; - u64 h3[2]; - u64 h4[2]; - be128 k; + u64 h[][2]; }; struct ghash_desc_ctx { @@ -51,22 +47,18 @@ struct gcm_aes_ctx { }; asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); + u64 const h[][2], const char *head); asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); + u64 const h[][2], const char *head); asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[], - struct ghash_key const *k, u64 dg[], - u8 ctr[], u32 const rk[], int rounds, - u8 tag[]); + u64 const h[][2], u64 dg[], u8 ctr[], + u32 const rk[], int rounds, u8 tag[]); asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[], - struct ghash_key const *k, u64 dg[], - u8 ctr[], u32 const rk[], int rounds, - u8 tag[]); + u64 const h[][2], u64 dg[], u8 ctr[], + u32 const rk[], int rounds, u8 tag[]); static int ghash_init(struct shash_desc *desc) { @@ -80,12 +72,12 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, struct ghash_key *key, const char *head, void (*simd_update)(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, + u64 const h[][2], const char *head)) { if (likely(crypto_simd_usable() && simd_update)) { kernel_neon_begin(); - simd_update(blocks, dg, src, key, head); + simd_update(blocks, dg, src, key->h, head); kernel_neon_end(); } else { be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; @@ -195,7 +187,7 @@ static int ghash_setkey(struct crypto_shash *tfm, /* needed for the fallback */ memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); - ghash_reflect(key->h, &key->k); + ghash_reflect(key->h[0], &key->k); return 0; } @@ -204,7 +196,7 @@ static struct shash_alg ghash_alg = { .base.cra_driver_name = "ghash-neon", .base.cra_priority = 150, .base.cra_blocksize = GHASH_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ghash_key), + .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]), .base.cra_module = THIS_MODULE, .digestsize = GHASH_DIGEST_SIZE, @@ -244,17 +236,17 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, /* needed for the fallback */ memcpy(&ctx->ghash_key.k, key, GHASH_BLOCK_SIZE); - ghash_reflect(ctx->ghash_key.h, &ctx->ghash_key.k); + ghash_reflect(ctx->ghash_key.h[0], &ctx->ghash_key.k); h = ctx->ghash_key.k; gf128mul_lle(&h, &ctx->ghash_key.k); - ghash_reflect(ctx->ghash_key.h2, &h); + ghash_reflect(ctx->ghash_key.h[1], &h); gf128mul_lle(&h, &ctx->ghash_key.k); - ghash_reflect(ctx->ghash_key.h3, &h); + ghash_reflect(ctx->ghash_key.h[2], &h); gf128mul_lle(&h, &ctx->ghash_key.k); - ghash_reflect(ctx->ghash_key.h4, &h); + ghash_reflect(ctx->ghash_key.h[3], &h); return 0; } @@ -380,8 +372,8 @@ static int gcm_encrypt(struct aead_request *req) } kernel_neon_begin(); - pmull_gcm_encrypt(nbytes, dst, src, &ctx->ghash_key, dg, - iv, ctx->aes_key.key_enc, nrounds, + pmull_gcm_encrypt(nbytes, dst, src, ctx->ghash_key.h, + dg, iv, ctx->aes_key.key_enc, nrounds, tag); kernel_neon_end(); @@ -494,8 +486,8 @@ static int gcm_decrypt(struct aead_request *req) } kernel_neon_begin(); - pmull_gcm_decrypt(nbytes, dst, src, &ctx->ghash_key, dg, - iv, ctx->aes_key.key_enc, nrounds, + pmull_gcm_decrypt(nbytes, dst, src, ctx->ghash_key.h, + dg, iv, ctx->aes_key.key_enc, nrounds, tag); kernel_neon_end(); @@ -582,7 +574,8 @@ static struct aead_alg gcm_aes_alg = { .base.cra_driver_name = "gcm-aes-ce", .base.cra_priority = 300, .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct gcm_aes_ctx), + .base.cra_ctxsize = sizeof(struct gcm_aes_ctx) + + 4 * sizeof(u64[2]), .base.cra_module = THIS_MODULE, }; From e4f874858cc16bdf99ca4aa678c94d6e448f8847 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 29 Jun 2020 09:39:24 +0200 Subject: [PATCH 068/157] crypto: arm64/gcm - use inline helper to suppress indirect calls Introduce an inline wrapper for ghash_do_update() that incorporates the indirect call to the asm routine that is passed as an argument, and keep the non-SIMD fallback code out of line. This ensures that all references to the function pointer are inlined where the address is taken, removing the need for any indirect calls to begin with. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/ghash-ce-glue.c | 85 +++++++++++++++++-------------- 1 file changed, 46 insertions(+), 39 deletions(-) diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 2ae95dcf648f..da1034867aaa 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -69,36 +69,43 @@ static int ghash_init(struct shash_desc *desc) } static void ghash_do_update(int blocks, u64 dg[], const char *src, - struct ghash_key *key, const char *head, - void (*simd_update)(int blocks, u64 dg[], - const char *src, - u64 const h[][2], - const char *head)) + struct ghash_key *key, const char *head) { - if (likely(crypto_simd_usable() && simd_update)) { + be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; + + do { + const u8 *in = src; + + if (head) { + in = head; + blocks++; + head = NULL; + } else { + src += GHASH_BLOCK_SIZE; + } + + crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); + gf128mul_lle(&dst, &key->k); + } while (--blocks); + + dg[0] = be64_to_cpu(dst.b); + dg[1] = be64_to_cpu(dst.a); +} + +static __always_inline +void ghash_do_simd_update(int blocks, u64 dg[], const char *src, + struct ghash_key *key, const char *head, + void (*simd_update)(int blocks, u64 dg[], + const char *src, + u64 const h[][2], + const char *head)) +{ + if (likely(crypto_simd_usable())) { kernel_neon_begin(); simd_update(blocks, dg, src, key->h, head); kernel_neon_end(); } else { - be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; - - do { - const u8 *in = src; - - if (head) { - in = head; - blocks++; - head = NULL; - } else { - src += GHASH_BLOCK_SIZE; - } - - crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); - gf128mul_lle(&dst, &key->k); - } while (--blocks); - - dg[0] = be64_to_cpu(dst.b); - dg[1] = be64_to_cpu(dst.a); + ghash_do_update(blocks, dg, src, key, head); } } @@ -131,9 +138,9 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, do { int chunk = min(blocks, MAX_BLOCKS); - ghash_do_update(chunk, ctx->digest, src, key, - partial ? ctx->buf : NULL, - pmull_ghash_update_p8); + ghash_do_simd_update(chunk, ctx->digest, src, key, + partial ? ctx->buf : NULL, + pmull_ghash_update_p8); blocks -= chunk; src += chunk * GHASH_BLOCK_SIZE; @@ -155,8 +162,8 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, - pmull_ghash_update_p8); + ghash_do_simd_update(1, ctx->digest, ctx->buf, key, NULL, + pmull_ghash_update_p8); } put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[0], dst + 8); @@ -280,9 +287,9 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) { int blocks = count / GHASH_BLOCK_SIZE; - ghash_do_update(blocks, dg, src, &ctx->ghash_key, - *buf_count ? buf : NULL, - pmull_ghash_update_p64); + ghash_do_simd_update(blocks, dg, src, &ctx->ghash_key, + *buf_count ? buf : NULL, + pmull_ghash_update_p64); src += blocks * GHASH_BLOCK_SIZE; count %= GHASH_BLOCK_SIZE; @@ -326,8 +333,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) if (buf_count) { memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); - ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL, - pmull_ghash_update_p64); + ghash_do_simd_update(1, dg, buf, &ctx->ghash_key, NULL, + pmull_ghash_update_p64); } } @@ -403,7 +410,7 @@ static int gcm_encrypt(struct aead_request *req) } while (--remaining > 0); ghash_do_update(blocks, dg, walk.dst.virt.addr, - &ctx->ghash_key, NULL, NULL); + &ctx->ghash_key, NULL); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); @@ -422,7 +429,7 @@ static int gcm_encrypt(struct aead_request *req) tag = (u8 *)&lengths; ghash_do_update(1, dg, tag, &ctx->ghash_key, - walk.nbytes ? buf : NULL, NULL); + walk.nbytes ? buf : NULL); if (walk.nbytes) err = skcipher_walk_done(&walk, 0); @@ -507,7 +514,7 @@ static int gcm_decrypt(struct aead_request *req) u8 *dst = walk.dst.virt.addr; ghash_do_update(blocks, dg, walk.src.virt.addr, - &ctx->ghash_key, NULL, NULL); + &ctx->ghash_key, NULL); do { aes_encrypt(&ctx->aes_key, buf, iv); @@ -530,7 +537,7 @@ static int gcm_decrypt(struct aead_request *req) tag = (u8 *)&lengths; ghash_do_update(1, dg, tag, &ctx->ghash_key, - walk.nbytes ? buf : NULL, NULL); + walk.nbytes ? buf : NULL); if (walk.nbytes) { aes_encrypt(&ctx->aes_key, buf, iv); From 3d2df84548ed88dc3344392d4e5afb8884d05360 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 29 Jun 2020 09:39:25 +0200 Subject: [PATCH 069/157] crypto: arm/ghash - use variably sized key struct Of the two versions of GHASH that the ARM driver implements, only one performs aggregation, and so the other one has no use for the powers of H to be precomputed, or space to be allocated for them in the key struct. So make the context size dependent on which version is being selected, and while at it, use a static key to carry this decision, and get rid of the function pointer. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm/crypto/ghash-ce-glue.c | 51 ++++++++++++++++----------------- 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index a00fd329255f..f13401f3e669 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -16,6 +16,7 @@ #include #include #include +#include #include MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions"); @@ -27,12 +28,8 @@ MODULE_ALIAS_CRYPTO("ghash"); #define GHASH_DIGEST_SIZE 16 struct ghash_key { - u64 h[2]; - u64 h2[2]; - u64 h3[2]; - u64 h4[2]; - be128 k; + u64 h[][2]; }; struct ghash_desc_ctx { @@ -46,16 +43,12 @@ struct ghash_async_ctx { }; asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); + u64 const h[][2], const char *head); asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); + u64 const h[][2], const char *head); -static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64); static int ghash_init(struct shash_desc *desc) { @@ -70,7 +63,10 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, { if (likely(crypto_simd_usable())) { kernel_neon_begin(); - pmull_ghash_update(blocks, dg, src, key, head); + if (static_branch_likely(&use_p64)) + pmull_ghash_update_p64(blocks, dg, src, key->h, head); + else + pmull_ghash_update_p8(blocks, dg, src, key->h, head); kernel_neon_end(); } else { be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; @@ -161,25 +157,26 @@ static int ghash_setkey(struct crypto_shash *tfm, const u8 *inkey, unsigned int keylen) { struct ghash_key *key = crypto_shash_ctx(tfm); - be128 h; if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; /* needed for the fallback */ memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); - ghash_reflect(key->h, &key->k); + ghash_reflect(key->h[0], &key->k); - h = key->k; - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h2, &h); + if (static_branch_likely(&use_p64)) { + be128 h = key->k; - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h3, &h); + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h[1], &h); - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h4, &h); + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h[2], &h); + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h[3], &h); + } return 0; } @@ -195,7 +192,7 @@ static struct shash_alg ghash_alg = { .base.cra_driver_name = "ghash-ce-sync", .base.cra_priority = 300 - 1, .base.cra_blocksize = GHASH_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ghash_key), + .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]), .base.cra_module = THIS_MODULE, }; @@ -354,10 +351,10 @@ static int __init ghash_ce_mod_init(void) if (!(elf_hwcap & HWCAP_NEON)) return -ENODEV; - if (elf_hwcap2 & HWCAP2_PMULL) - pmull_ghash_update = pmull_ghash_update_p64; - else - pmull_ghash_update = pmull_ghash_update_p8; + if (elf_hwcap2 & HWCAP2_PMULL) { + ghash_alg.base.cra_ctxsize += 3 * sizeof(u64[2]); + static_branch_enable(&use_p64); + } err = crypto_register_shash(&ghash_alg); if (err) From eeedb618378f8a09779546a3eeac16b000447d62 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 16 Jul 2020 21:45:03 +1000 Subject: [PATCH 070/157] crypto: caam - Remove broken arc4 support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The arc4 algorithm requires storing state in the request context in order to allow more than one encrypt/decrypt operation. As this driver does not seem to do that, it means that using it for more than one operation is broken. Fixes: eaed71a44ad9 ("crypto: caam - add ecb(*) support") Link: https://lore.kernel.org/linux-crypto/CAMj1kXGvMe_A_iQ43Pmygg9xaAM-RLy=_M=v+eg--8xNmv9P+w@mail.gmail.com Link: https://lore.kernel.org/linux-crypto/20200702101947.682-1-ardb@kernel.org Signed-off-by: Herbert Xu Acked-by: Ard Biesheuvel Acked-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 29 ----------------------------- drivers/crypto/caam/compat.h | 1 - 2 files changed, 30 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b2f9882bc010..797bff9b9318 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -810,12 +810,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } -static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher, - const u8 *key, unsigned int keylen) -{ - return skcipher_setkey(skcipher, key, keylen, 0); -} - static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -1967,21 +1961,6 @@ static struct caam_skcipher_alg driver_algs[] = { }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, }, - { - .skcipher = { - .base = { - .cra_name = "ecb(arc4)", - .cra_driver_name = "ecb-arc4-caam", - .cra_blocksize = ARC4_BLOCK_SIZE, - }, - .setkey = arc4_skcipher_setkey, - .encrypt = skcipher_encrypt, - .decrypt = skcipher_decrypt, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - }, - .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB, - }, }; static struct caam_aead_alg driver_aeads[] = { @@ -3457,7 +3436,6 @@ int caam_algapi_init(struct device *ctrldev) struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int i = 0, err = 0; u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; - u32 arc4_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false, gcm_support; @@ -3477,8 +3455,6 @@ int caam_algapi_init(struct device *ctrldev) CHA_ID_LS_DES_SHIFT; aes_inst = cha_inst & CHA_ID_LS_AES_MASK; md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; - arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >> - CHA_ID_LS_ARC4_SHIFT; ccha_inst = 0; ptha_inst = 0; @@ -3499,7 +3475,6 @@ int caam_algapi_init(struct device *ctrldev) md_inst = mdha & CHA_VER_NUM_MASK; ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; - arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK; gcm_support = aesa & CHA_VER_MISC_AES_GCM; } @@ -3522,10 +3497,6 @@ int caam_algapi_init(struct device *ctrldev) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; - /* Skip ARC4 algorithms if not supported by device */ - if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4) - continue; - /* * Check support for AES modes not available * on LP devices. diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 60e2a54c19f1..c3c22a8de4c0 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include From 7dc95d0ea32e75c8b2cb883642f33353939c9bac Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Tue, 7 Jul 2020 09:15:37 +0800 Subject: [PATCH 071/157] crypto: hisilicon/sec2 - clear SEC debug regs SEC debug registers aren't cleared even if its driver is removed, so add a clearing operation in driver removing. Signed-off-by: Kai Ye Reviewed-by: Longfang Liu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 57de51fa6f51..d5f0589a1b0d 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -346,10 +346,17 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm) /* sec_debug_regs_clear() - clear the sec debug regs */ static void sec_debug_regs_clear(struct hisi_qm *qm) { + int i; + /* clear current_qm */ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + /* clear sec dfx regs */ + writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); + for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) + readl(qm->io_base + sec_dfx_regs[i].offset); + /* clear rdclr_en */ writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); From 9597efc3aed1ba8ad632e4987b1ee08af7ca943f Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Tue, 7 Jul 2020 09:15:38 +0800 Subject: [PATCH 072/157] crypto:hisilicon/sec2 - update busy processing logic As before, if a SEC queue is at the 'fake busy' status, the request with a 'fake busy' flag will be sent into hardware and the sending function returns busy. After the request is finished, SEC driver's call back will identify the 'fake busy' flag, and notifies the user that hardware is not busy now by calling user's call back function. Now, a request sent into busy hardware will be cached in the SEC queue's backlog, return '-EBUSY' to user. After the request being finished, the cached requests will be processed in the call back function. to notify the corresponding user that SEC queue can process more requests. Signed-off-by: Kai Ye Reviewed-by: Longfang Liu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec.h | 4 + drivers/crypto/hisilicon/sec2/sec_crypto.c | 91 ++++++++++++++++------ drivers/crypto/hisilicon/sec2/sec_main.c | 1 + 3 files changed, 71 insertions(+), 25 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 7b64aca704d6..037762b531e2 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -46,9 +46,11 @@ struct sec_req { struct sec_cipher_req c_req; struct sec_aead_req aead_req; + struct list_head backlog_head; int err_type; int req_id; + int flag; /* Status of the SEC request */ bool fake_busy; @@ -104,6 +106,7 @@ struct sec_qp_ctx { struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; struct mutex req_lock; + struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; atomic_t pending_reqs; @@ -161,6 +164,7 @@ struct sec_dfx { atomic64_t send_cnt; atomic64_t recv_cnt; atomic64_t send_busy_cnt; + atomic64_t recv_busy_cnt; atomic64_t err_bd_cnt; atomic64_t invalid_req_cnt; atomic64_t done_flag_cnt; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 64614a9bdf21..bfb9ce1359f3 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -166,6 +166,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; if (unlikely(!req)) { atomic64_inc(&dfx->invalid_req_cnt); + atomic_inc(&qp->qp_status.used); return; } req->err_type = bd->type2.error_type; @@ -198,21 +199,30 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) struct sec_qp_ctx *qp_ctx = req->qp_ctx; int ret; + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && + !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); + + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { + list_add_tail(&req->backlog_head, &qp_ctx->backlog); + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); + atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); + mutex_unlock(&qp_ctx->req_lock); + return -EBUSY; + } mutex_unlock(&qp_ctx->req_lock); - atomic64_inc(&ctx->sec->debug.dfx.send_cnt); if (unlikely(ret == -EBUSY)) return -ENOBUFS; - if (!ret) { - if (req->fake_busy) { - atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - ret = -EBUSY; - } else { - ret = -EINPROGRESS; - } + if (likely(!ret)) { + ret = -EINPROGRESS; + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); } return ret; @@ -373,8 +383,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, qp_ctx->ctx = ctx; mutex_init(&qp_ctx->req_lock); - atomic_set(&qp_ctx->pending_reqs, 0); idr_init(&qp_ctx->req_idr); + INIT_LIST_HEAD(&qp_ctx->backlog); qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, SEC_SGL_SGE_NR); @@ -1048,21 +1058,49 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); } +static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct sec_req *backlog_req = NULL; + + mutex_lock(&qp_ctx->req_lock); + if (ctx->fake_req_limit >= + atomic_read(&qp_ctx->qp->qp_status.used) && + !list_empty(&qp_ctx->backlog)) { + backlog_req = list_first_entry(&qp_ctx->backlog, + typeof(*backlog_req), backlog_head); + list_del(&backlog_req->backlog_head); + } + mutex_unlock(&qp_ctx->req_lock); + + return backlog_req; +} + static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { struct skcipher_request *sk_req = req->c_req.sk_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct skcipher_request *backlog_sk_req; + struct sec_req *backlog_req; - atomic_dec(&qp_ctx->pending_reqs); sec_free_req_id(req); /* IV output at encrypto of CBC mode */ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); - if (req->fake_busy) - sk_req->base.complete(&sk_req->base, -EINPROGRESS); + while (1) { + backlog_req = sec_back_req_clear(ctx, qp_ctx); + if (!backlog_req) + break; + + backlog_sk_req = backlog_req->c_req.sk_req; + backlog_sk_req->base.complete(&backlog_sk_req->base, + -EINPROGRESS); + atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); + } + sk_req->base.complete(&sk_req->base, err); } @@ -1133,10 +1171,10 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) struct sec_cipher_req *c_req = &req->c_req; size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct aead_request *backlog_aead_req; + struct sec_req *backlog_req; size_t sz; - atomic_dec(&qp_ctx->pending_reqs); - if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) sec_update_iv(req, SEC_AEAD); @@ -1157,17 +1195,22 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) sec_free_req_id(req); - if (req->fake_busy) - a_req->base.complete(&a_req->base, -EINPROGRESS); + while (1) { + backlog_req = sec_back_req_clear(c, qp_ctx); + if (!backlog_req) + break; + + backlog_aead_req = backlog_req->aead_req.aead_req; + backlog_aead_req->base.complete(&backlog_aead_req->base, + -EINPROGRESS); + atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); + } a_req->base.complete(&a_req->base, err); } static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) { - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - - atomic_dec(&qp_ctx->pending_reqs); sec_free_req_id(req); sec_free_queue_id(ctx, req); } @@ -1187,11 +1230,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) return req->req_id; } - if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) - req->fake_busy = true; - else - req->fake_busy = false; - return 0; } @@ -1213,7 +1251,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) sec_update_iv(req, ctx->alg_type); ret = ctx->req_op->bd_send(ctx, req); - if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) { + if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || + (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); goto err_send_req; } @@ -1407,6 +1446,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) if (!sk_req->cryptlen) return 0; + req->flag = sk_req->base.flags; req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; @@ -1530,6 +1570,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct sec_ctx *ctx = crypto_aead_ctx(tfm); int ret; + req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index d5f0589a1b0d..109e7400f898 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -122,6 +122,7 @@ static struct sec_dfx_item sec_dfx_labels[] = { {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, + {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, From d0228aeb4d65c6550eac6e7c3a91520d2ceedf4f Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Tue, 7 Jul 2020 09:15:39 +0800 Subject: [PATCH 073/157] crypto: hisilicon/sec2 - update SEC initialization and reset Updates the initialization and reset of SEC driver's register operation. Signed-off-by: Longfang Liu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.h | 1 + drivers/crypto/hisilicon/sec2/sec_main.c | 55 ++++++++++++------------ 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 0a351de8d838..6c1d3c7d64ee 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -44,6 +44,7 @@ #define QM_AXI_M_CFG 0x1000ac #define AXI_M_CFG 0xffff #define QM_AXI_M_CFG_ENABLE 0x1000b0 +#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 #define AXI_M_CFG_ENABLE 0xffffffff #define QM_PEH_AXUSER_CFG 0x1000cc #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 109e7400f898..0c129878dce7 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -22,11 +22,9 @@ #define SEC_PF_PCI_DEVICE_ID 0xa255 #define SEC_VF_PCI_DEVICE_ID 0xa256 -#define SEC_XTS_MIV_ENABLE_REG 0x301384 -#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF -#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF -#define SEC_BD_ERR_CHK_EN1 0xfffff7fd -#define SEC_BD_ERR_CHK_EN2 0xffffbfff +#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF +#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd +#define SEC_BD_ERR_CHK_EN3 0xffffbfff #define SEC_SQE_SIZE 128 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) @@ -47,17 +45,18 @@ #define SEC_ECC_ADDR(err) ((err) >> 0) #define SEC_CORE_INT_DISABLE 0x0 #define SEC_CORE_INT_ENABLE 0x1ff +#define SEC_CORE_INT_CLEAR 0x1ff +#define SEC_SAA_ENABLE 0x17f -#define SEC_RAS_CE_REG 0x50 -#define SEC_RAS_FE_REG 0x54 -#define SEC_RAS_NFE_REG 0x58 +#define SEC_RAS_CE_REG 0x301050 +#define SEC_RAS_FE_REG 0x301054 +#define SEC_RAS_NFE_REG 0x301058 #define SEC_RAS_CE_ENB_MSK 0x88 #define SEC_RAS_FE_ENB_MSK 0x0 #define SEC_RAS_NFE_ENB_MSK 0x177 #define SEC_RAS_DISABLE 0x0 #define SEC_MEM_START_INIT_REG 0x0100 #define SEC_MEM_INIT_DONE_REG 0x0104 -#define SEC_QM_ABNORMAL_INT_MASK 0x100004 #define SEC_CONTROL_REG 0x0200 #define SEC_TRNG_EN_SHIFT 8 @@ -68,8 +67,10 @@ #define SEC_INTERFACE_USER_CTRL0_REG 0x0220 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224 +#define SEC_SAA_EN_REG 0x0270 +#define SEC_BD_ERR_CHK_EN_REG0 0x0380 #define SEC_BD_ERR_CHK_EN_REG1 0x0384 -#define SEC_BD_ERR_CHK_EN_REG2 0x038c +#define SEC_BD_ERR_CHK_EN_REG3 0x038c #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) @@ -77,8 +78,8 @@ #define SEC_DELAY_10_US 10 #define SEC_POLL_TIMEOUT_US 1000 -#define SEC_VF_CNT_MASK 0xffffffc0 #define SEC_DBGFS_VAL_MAX_LEN 20 +#define SEC_SINGLE_PORT_MAX_TRANS 0x2060 #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 @@ -297,25 +298,25 @@ static int sec_engine_init(struct hisi_qm *qm) reg |= SEC_USER1_SMMU_NORMAL; writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + writel(SEC_SINGLE_PORT_MAX_TRANS, + qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); + + writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG)); + + /* Enable sm4 extra mode, as ctr/ecb */ + writel_relaxed(SEC_BD_ERR_CHK_EN0, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0)); + /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); - writel_relaxed(SEC_BD_ERR_CHK_EN2, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2)); - - /* enable clock gate control */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); - reg |= SEC_CLK_GATE_ENABLE; - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(SEC_BD_ERR_CHK_EN3, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3)); /* config endian */ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); reg |= sec_get_endian(qm); writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); - /* Enable sm4 xts mode multiple iv */ - writel_relaxed(SEC_XTS_MIV_ENABLE_MSK, - qm->io_base + SEC_XTS_MIV_ENABLE_REG); - return 0; } @@ -374,10 +375,10 @@ static void sec_hw_error_enable(struct hisi_qm *qm) return; } - val = readl(qm->io_base + SEC_CONTROL_REG); + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); /* clear SEC hw error source if having */ - writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE); + writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); /* enable SEC hw error interrupts */ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); @@ -390,14 +391,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm) /* enable SEC block master OOO when m-bit error occur */ val = val | SEC_AXI_SHUTDOWN_ENABLE; - writel(val, qm->io_base + SEC_CONTROL_REG); + writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); } static void sec_hw_error_disable(struct hisi_qm *qm) { u32 val; - val = readl(qm->io_base + SEC_CONTROL_REG); + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); /* disable RAS int */ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); @@ -410,7 +411,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm) /* disable SEC block master OOO when m-bit error occur */ val = val & SEC_AXI_SHUTDOWN_DISABLE; - writel(val, qm->io_base + SEC_CONTROL_REG); + writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); } static u32 sec_current_qm_read(struct sec_debug_file *file) From 6b534f7aaf0cf34658e0cdcd561514a174c4f9d2 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Tue, 7 Jul 2020 09:15:40 +0800 Subject: [PATCH 074/157] crypto: hisilicon/sec2 - update debugfs interface parameters Update debugfs interface parameters, and adjust the processing logic inside the corresponding function Signed-off-by: Longfang Liu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 52 ++++++++++++------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 0c129878dce7..301f042feffc 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -586,16 +586,16 @@ static int sec_debugfs_atomic64_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, sec_debugfs_atomic64_set, "%lld\n"); -static int sec_core_debug_init(struct sec_dev *sec) +static int sec_core_debug_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); struct device *dev = &qm->pdev->dev; struct sec_dfx *dfx = &sec->debug.dfx; struct debugfs_regset32 *regset; struct dentry *tmp_d; int i; - tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root); + tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) @@ -618,44 +618,44 @@ static int sec_core_debug_init(struct sec_dev *sec) return 0; } -static int sec_debug_init(struct sec_dev *sec) +static int sec_debug_init(struct hisi_qm *qm) { + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); int i; - for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { - spin_lock_init(&sec->debug.files[i].lock); - sec->debug.files[i].index = i; - sec->debug.files[i].qm = &sec->qm; + if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { + for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { + spin_lock_init(&sec->debug.files[i].lock); + sec->debug.files[i].index = i; + sec->debug.files[i].qm = qm; - debugfs_create_file(sec_dbg_file_name[i], 0600, - sec->qm.debug.debug_root, - sec->debug.files + i, - &sec_dbg_fops); + debugfs_create_file(sec_dbg_file_name[i], 0600, + qm->debug.debug_root, + sec->debug.files + i, + &sec_dbg_fops); + } } - return sec_core_debug_init(sec); + return sec_core_debug_init(qm); } -static int sec_debugfs_init(struct sec_dev *sec) +static int sec_debugfs_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; struct device *dev = &qm->pdev->dev; int ret; qm->debug.debug_root = debugfs_create_dir(dev_name(dev), sec_debugfs_root); - qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; ret = hisi_qm_debug_init(qm); if (ret) goto failed_to_create; - if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { - ret = sec_debug_init(sec); - if (ret) - goto failed_to_create; - } + ret = sec_debug_init(qm); + if (ret) + goto failed_to_create; + return 0; @@ -665,9 +665,9 @@ failed_to_create: return ret; } -static void sec_debugfs_exit(struct sec_dev *sec) +static void sec_debugfs_exit(struct hisi_qm *qm) { - debugfs_remove_recursive(sec->qm.debug.debug_root); + debugfs_remove_recursive(qm->debug.debug_root); } static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) @@ -877,7 +877,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_probe_uninit; } - ret = sec_debugfs_init(sec); + ret = sec_debugfs_init(qm); if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); @@ -902,7 +902,7 @@ err_crypto_unregister: err_remove_from_list: hisi_qm_del_from_list(qm, &sec_devices); - sec_debugfs_exit(sec); + sec_debugfs_exit(qm); hisi_qm_stop(qm); err_probe_uninit: @@ -926,7 +926,7 @@ static void sec_remove(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev); - sec_debugfs_exit(sec); + sec_debugfs_exit(qm); (void)hisi_qm_stop(qm); From 38c3b74edf23dd2f175bc7801962acb3b0985201 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Tue, 7 Jul 2020 09:15:41 +0800 Subject: [PATCH 075/157] crypto: hisilicon/sec2 - fix some coding styles Modify some log output interfaces and update author information Signed-off-by: Longfang Liu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 301f042feffc..2297425486cb 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -282,7 +282,7 @@ static int sec_engine_init(struct hisi_qm *qm) reg, reg & 0x1, SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) { - dev_err(&qm->pdev->dev, "fail to init sec mem\n"); + pci_err(qm->pdev, "fail to init sec mem\n"); return ret; } @@ -371,7 +371,7 @@ static void sec_hw_error_enable(struct hisi_qm *qm) if (qm->ver == QM_HW_V1) { writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); - dev_info(&qm->pdev->dev, "V1 not support hw error handle\n"); + pci_info(qm->pdev, "V1 not support hw error handle\n"); return; } @@ -599,7 +599,7 @@ static int sec_core_debug_init(struct hisi_qm *qm) regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) - return -ENOENT; + return -ENOMEM; regset->regs = sec_dfx_regs; regset->nregs = ARRAY_SIZE(sec_dfx_regs); @@ -686,8 +686,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) SEC_CORE_SRAM_ECC_ERR_INFO); dev_err(dev, "multi ecc sram num=0x%x\n", SEC_ECC_NUM(err_val)); - dev_err(dev, "multi ecc sram addr=0x%x\n", - SEC_ECC_ADDR(err_val)); } } errs++; @@ -996,5 +994,6 @@ module_exit(sec_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zaibo Xu "); MODULE_AUTHOR("Longfang Liu "); +MODULE_AUTHOR("Kai Ye "); MODULE_AUTHOR("Wei Zhang "); MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); From 271dead302d3af20bdb917bffdb8d318e60c23d7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 7 Jul 2020 13:47:13 +1000 Subject: [PATCH 076/157] hwrng: ba431 - Include kernel.h There are multiple things in this file that requires kernel.h but it's only included through other header files indirectly. This patch adds a direct inclusion as those indirect inclusions may go away at any point. Signed-off-by: Herbert Xu --- drivers/char/hw_random/ba431-rng.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c index a39e3abf50b9..410b50b05e21 100644 --- a/drivers/char/hw_random/ba431-rng.c +++ b/drivers/char/hw_random/ba431-rng.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include From 3f368b886e95080daf811d228795c28716495279 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:51 +0300 Subject: [PATCH 077/157] crypto: amlogic-gxl - default to build as module The AmLogic GXL crypto accelerator driver is built into the kernel if ARCH_MESON is set. However, given the single image policy of arm64, its defconfig enables all platforms by default, and so ARCH_MESON is usually enabled. This means that the AmLogic driver causes the arm64 defconfig build to pull in a huge chunk of the crypto stack as a builtin as well, which is undesirable, so let's make the amlogic GXL driver default to 'm' instead. Signed-off-by: Ard Biesheuvel Tested-by: Corentin Labbe Signed-off-by: Herbert Xu --- drivers/crypto/amlogic/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig index cf9547602670..cf2c676a7093 100644 --- a/drivers/crypto/amlogic/Kconfig +++ b/drivers/crypto/amlogic/Kconfig @@ -1,7 +1,7 @@ config CRYPTO_DEV_AMLOGIC_GXL tristate "Support for amlogic cryptographic offloader" depends on HAS_IOMEM - default y if ARCH_MESON + default m if ARCH_MESON select CRYPTO_SKCIPHER select CRYPTO_ENGINE select CRYPTO_ECB From 1d63e4557f0d11fd795c7acd50a241da2d8bf89f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:52 +0300 Subject: [PATCH 078/157] crypto: amlogic-gxl - permit async skcipher as fallback Even though the amlogic-gxl driver implements asynchronous versions of ecb(aes) and cbc(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Tested-by: Corentin Labbe Signed-off-by: Herbert Xu --- drivers/crypto/amlogic/amlogic-gxl-cipher.c | 27 ++++++++++----------- drivers/crypto/amlogic/amlogic-gxl.h | 3 ++- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 9819dd50fbad..5880b94dcb32 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -64,22 +64,20 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq) #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct meson_alg_template *algt; -#endif - SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm); -#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG algt = container_of(alg, struct meson_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(req, op->fallback_tfm); - skcipher_request_set_callback(req, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(req, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); + if (rctx->op_dir == MESON_DECRYPT) - err = crypto_skcipher_decrypt(req); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -321,15 +319,16 @@ int meson_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct meson_alg_template, alg.skcipher); op->mc = algt->mc; - sktfm->reqsize = sizeof(struct meson_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + op->enginectx.op.do_one_request = meson_handle_cipher_request; op->enginectx.op.prepare_request = NULL; op->enginectx.op.unprepare_request = NULL; @@ -345,7 +344,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); } int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -377,5 +376,5 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index b7f2de91ab76..dc0f142324a3 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -109,6 +109,7 @@ struct meson_dev { struct meson_cipher_req_ctx { u32 op_dir; int flow; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -126,7 +127,7 @@ struct meson_cipher_tfm_ctx { u32 keylen; u32 keymode; struct meson_dev *mc; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* From 6a99d7a2d73cbeaa0b93551fc8ec887295821bbe Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:53 +0300 Subject: [PATCH 079/157] crypto: omap-aes - permit asynchronous skcipher as fallback Even though the omap-aes driver implements asynchronous versions of ecb(aes), cbc(aes) and ctr(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 35 +++++++++++++++++------------------ drivers/crypto/omap-aes.h | 3 ++- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index b5aff20c5900..25154b74dcc6 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -548,20 +548,18 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode) !!(mode & FLAGS_CBC)); if (req->cryptlen < aes_fallback_sz) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, NULL, - NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); if (mode & FLAGS_ENCRYPT) - ret = crypto_skcipher_encrypt(subreq); + ret = crypto_skcipher_encrypt(&rctx->fallback_req); else - ret = crypto_skcipher_decrypt(subreq); - - skcipher_request_zero(subreq); + ret = crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } dd = omap_aes_find_dev(rctx); @@ -590,11 +588,11 @@ static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; - crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & + crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (!ret) return 0; @@ -640,15 +638,16 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm) { const char *name = crypto_tfm_alg_name(&tfm->base); struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *blk; + struct crypto_skcipher *blk; - blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(blk)) return PTR_ERR(blk); ctx->fallback = blk; - crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) + + crypto_skcipher_reqsize(blk)); ctx->enginectx.op.prepare_request = omap_aes_prepare_req; ctx->enginectx.op.unprepare_request = NULL; @@ -662,7 +661,7 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->fallback) - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); ctx->fallback = NULL; } diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h index 2d111bf906e1..23d073e87bb8 100644 --- a/drivers/crypto/omap-aes.h +++ b/drivers/crypto/omap-aes.h @@ -97,7 +97,7 @@ struct omap_aes_ctx { int keylen; u32 key[AES_KEYSIZE_256 / sizeof(u32)]; u8 nonce[4]; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; struct omap_aes_gcm_ctx { @@ -110,6 +110,7 @@ struct omap_aes_reqctx { unsigned long mode; u8 iv[AES_BLOCK_SIZE]; u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)]; + struct skcipher_request fallback_req; // keep at the end }; #define OMAP_AES_QUEUE_LENGTH 1 From 89fb00f245681d0c951cd889d3d165cf9959d70c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:54 +0300 Subject: [PATCH 080/157] crypto: sun4i - permit asynchronous skcipher as fallback Even though the sun4i driver implements asynchronous versions of ecb(aes) and cbc(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Tested-by: Corentin Labbe Signed-off-by: Herbert Xu --- .../allwinner/sun4i-ss/sun4i-ss-cipher.c | 46 +++++++++---------- drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h | 3 +- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index 7f22d305178e..b72de8939497 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -122,19 +122,17 @@ static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_requ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); int err; - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, - NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (ctx->mode & SS_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&ctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&ctx->fallback_req); return err; } @@ -494,23 +492,25 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm) alg.crypto.base); op->ss = algt->ss; - crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), - sizeof(struct sun4i_cipher_req_ctx)); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct sun4i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm)); + + err = pm_runtime_get_sync(op->ss->dev); if (err < 0) goto error_pm; return 0; error_pm: - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -518,7 +518,7 @@ void sun4i_ss_cipher_exit(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put(op->ss->dev); } @@ -546,10 +546,10 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } /* check and set the DES key, prepare the mode to be used */ @@ -566,10 +566,10 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } /* check and set the 3DES key, prepare the mode to be used */ @@ -586,9 +586,9 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h index 2b4c6333eb67..163962f9e284 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h @@ -170,11 +170,12 @@ struct sun4i_tfm_ctx { u32 keylen; u32 keymode; struct sun4i_ss_ctx *ss; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; struct sun4i_cipher_req_ctx { u32 mode; + struct skcipher_request fallback_req; // keep at the end }; struct sun4i_req_ctx { From 31abd3eb3df6bd520fece598e31047a0f377953e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:55 +0300 Subject: [PATCH 081/157] crypto: sun8i-ce - permit asynchronous skcipher as fallback Even though the sun8i-ce driver implements asynchronous versions of ecb(aes) and cbc(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Acked-by: Corentin Labbe Tested-by: Corentin Labbe Signed-off-by: Herbert Xu --- .../allwinner/sun8i-ce/sun8i-ce-cipher.c | 41 +++++++++---------- drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h | 8 ++-- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 3665a0a2038f..1e4f9a58bb24 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -58,23 +58,20 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ce_alg_template *algt; -#endif - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->op_dir & CE_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -335,18 +332,20 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); op->ce = algt->ce; - sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + dev_info(op->ce->dev, "Fallback for %s is %s\n", crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base))); + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request; op->enginectx.op.prepare_request = NULL; @@ -359,7 +358,7 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) return 0; error_pm: pm_runtime_put_noidle(op->ce->dev); - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -371,7 +370,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync_suspend(op->ce->dev); } @@ -401,10 +400,10 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -426,8 +425,8 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 0e9eac397e1b..963645fe4adb 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -181,12 +181,14 @@ struct sun8i_ce_dev { /* * struct sun8i_cipher_req_ctx - context for a skcipher request - * @op_dir: direction (encrypt vs decrypt) for this request - * @flow: the flow to use for this request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { u32 op_dir; int flow; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -202,7 +204,7 @@ struct sun8i_cipher_tfm_ctx { u32 *key; u32 keylen; struct sun8i_ce_dev *ce; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* From 44b591753731f12dcf5d8e72df69baa9f3c48921 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:56 +0300 Subject: [PATCH 082/157] crypto: sun8i-ss - permit asynchronous skcipher as fallback Even though the sun8i-ss driver implements asynchronous versions of ecb(aes) and cbc(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Acked-by: Corentin Labbe Signed-off-by: Herbert Xu --- .../allwinner/sun8i-ss/sun8i-ss-cipher.c | 39 ++++++++++--------- drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h | 26 +++++++------ 2 files changed, 34 insertions(+), 31 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index c89cb2ee2496..7a131675a41c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -73,7 +73,6 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); int err; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt; @@ -81,15 +80,15 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->op_dir & SS_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -334,18 +333,20 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); op->ss = algt->ss; - sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + dev_info(op->ss->dev, "Fallback for %s is %s\n", crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base))); + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; op->enginectx.op.prepare_request = NULL; @@ -359,7 +360,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) return 0; error_pm: - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -371,7 +372,7 @@ void sun8i_ss_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync(op->ss->dev); } @@ -401,10 +402,10 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -427,8 +428,8 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h index 29c44f279112..0405767f1f7e 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -135,17 +135,18 @@ struct sun8i_ss_dev { /* * struct sun8i_cipher_req_ctx - context for a skcipher request - * @t_src: list of mapped SGs with their size - * @t_dst: list of mapped SGs with their size - * @p_key: DMA address of the key - * @p_iv: DMA address of the IV - * @method: current algorithm for this request - * @op_mode: op_mode for this request - * @op_dir: direction (encrypt vs decrypt) for this request - * @flow: the flow to use for this request - * @ivlen: size of biv - * @keylen: keylen for this request - * @biv: buffer which contain the IV + * @t_src: list of mapped SGs with their size + * @t_dst: list of mapped SGs with their size + * @p_key: DMA address of the key + * @p_iv: DMA address of the IV + * @method: current algorithm for this request + * @op_mode: op_mode for this request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @ivlen: size of biv + * @keylen: keylen for this request + * @biv: buffer which contain the IV + * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { struct sginfo t_src[MAX_SG]; @@ -159,6 +160,7 @@ struct sun8i_cipher_req_ctx { unsigned int ivlen; unsigned int keylen; void *biv; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -174,7 +176,7 @@ struct sun8i_cipher_tfm_ctx { u32 *key; u32 keylen; struct sun8i_ss_dev *ss; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* From 413b61ce0b4de0e55a9f8cf91bbce1ad7e0870cd Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:57 +0300 Subject: [PATCH 083/157] crypto: ccp - permit asynchronous skcipher as fallback Even though the ccp driver implements an asynchronous version of xts(aes), the fallback it allocates is required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Acked-by: John Allen Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-aes-xts.c | 33 ++++++++++++------------- drivers/crypto/ccp/ccp-crypto.h | 4 ++- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 04b2517df955..959168a7ac59 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -98,7 +98,7 @@ static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, ctx->u.aes.key_len = key_len / 2; sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); - return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); + return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); } static int ccp_aes_xts_crypt(struct skcipher_request *req, @@ -145,20 +145,19 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req, (ctx->u.aes.key_len != AES_KEYSIZE_256)) fallback = 1; if (fallback) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, - ctx->u.aes.tfm_skcipher); - /* Use the fallback to process the request for any * unsupported unit sizes or key sizes */ - skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - ret = encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + skcipher_request_set_tfm(&rctx->fallback_req, + ctx->u.aes.tfm_skcipher); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -198,13 +197,12 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req) static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; ctx->complete = ccp_aes_xts_complete; ctx->u.aes.key_len = 0; - fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0, - CRYPTO_ALG_ASYNC | + fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback_tfm)) { pr_warn("could not load fallback driver xts(aes)\n"); @@ -212,7 +210,8 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) } ctx->u.aes.tfm_skcipher = fallback_tfm; - crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) + + crypto_skcipher_reqsize(fallback_tfm)); return 0; } @@ -221,7 +220,7 @@ static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher); + crypto_free_skcipher(ctx->u.aes.tfm_skcipher); } static int ccp_register_aes_xts_alg(struct list_head *head, diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 90a009e6b5c1..aed3d2192d01 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -89,7 +89,7 @@ static inline struct ccp_crypto_ahash_alg * /***** AES related defines *****/ struct ccp_aes_ctx { /* Fallback cipher for XTS with unsupported unit sizes */ - struct crypto_sync_skcipher *tfm_skcipher; + struct crypto_skcipher *tfm_skcipher; enum ccp_engine engine; enum ccp_aes_type type; @@ -121,6 +121,8 @@ struct ccp_aes_req_ctx { u8 rfc3686_iv[AES_BLOCK_SIZE]; struct ccp_cmd cmd; + + struct skcipher_request fallback_req; // keep at the end }; struct ccp_aes_cmac_req_ctx { From d8c6d1886c8f436da96c0d91010040ca7caae9ac Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:58 +0300 Subject: [PATCH 084/157] crypto: chelsio - permit asynchronous skcipher as fallback Even though the chelsio driver implements asynchronous versions of cbc(aes) and xts(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.c | 57 +++++++++++----------------- drivers/crypto/chelsio/chcr_crypto.h | 3 +- 2 files changed, 25 insertions(+), 35 deletions(-) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 4c2553672b6f..a6625b90fb1a 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -690,26 +690,22 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, return min(srclen, dstlen); } -static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher, - u32 flags, - struct scatterlist *src, - struct scatterlist *dst, - unsigned int nbytes, +static int chcr_cipher_fallback(struct crypto_skcipher *cipher, + struct skcipher_request *req, u8 *iv, unsigned short op_type) { + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); int err; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher); + skcipher_request_set_tfm(&reqctx->fallback_req, cipher); + skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst, + req->cryptlen, iv); - skcipher_request_set_sync_tfm(subreq, cipher); - skcipher_request_set_callback(subreq, flags, NULL, NULL); - skcipher_request_set_crypt(subreq, src, dst, - nbytes, iv); - - err = op_type ? crypto_skcipher_decrypt(subreq) : - crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) : + crypto_skcipher_encrypt(&reqctx->fallback_req); return err; @@ -924,11 +920,11 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); - crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, + crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, + crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); + return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, @@ -1206,13 +1202,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, req); memcpy(req->iv, reqctx->init_iv, IV); atomic_inc(&adap->chcr_stats.fallback); - err = chcr_cipher_fallback(ablkctx->sw_cipher, - req->base.flags, - req->src, - req->dst, - req->cryptlen, - req->iv, - reqctx->op); + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv, + reqctx->op); goto complete; } @@ -1341,11 +1332,7 @@ static int process_cipher(struct skcipher_request *req, chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); fallback: atomic_inc(&adap->chcr_stats.fallback); - err = chcr_cipher_fallback(ablkctx->sw_cipher, - req->base.flags, - req->src, - req->dst, - req->cryptlen, + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? reqctx->iv : req->iv, @@ -1486,14 +1473,15 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm) struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0, + ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } init_completion(&ctx->cbc_aes_aio_done); - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } @@ -1507,13 +1495,14 @@ static int chcr_rfc3686_init(struct crypto_skcipher *tfm) /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) * cannot be used as fallback in chcr_handle_cipher_response */ - ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0, + ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } @@ -1523,7 +1512,7 @@ static void chcr_exit_tfm(struct crypto_skcipher *tfm) struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - crypto_free_sync_skcipher(ablkctx->sw_cipher); + crypto_free_skcipher(ablkctx->sw_cipher); } static int get_alg_config(struct algo_param *params, diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index b3fdbdc25acb..55a6631cdbee 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -171,7 +171,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) } struct ablk_ctx { - struct crypto_sync_skcipher *sw_cipher; + struct crypto_skcipher *sw_cipher; __be32 key_ctx_hdr; unsigned int enckey_len; unsigned char ciph_mode; @@ -305,6 +305,7 @@ struct chcr_skcipher_req_ctx { u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN]; u16 txqidx; u16 rxqidx; + struct skcipher_request fallback_req; // keep at the end }; struct chcr_alg_template { From c9598d4e13ca0a6747235257a8e38cdef6973d8e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:31:59 +0300 Subject: [PATCH 085/157] crypto: mxs-dcp - permit asynchronous skcipher as fallback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Even though the mxs-dcp driver implements asynchronous versions of ecb(aes) and cbc(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/mxs-dcp.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d84530293036..909a7eb748e3 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -97,7 +97,7 @@ struct dcp_async_ctx { unsigned int hot:1; /* Crypto-specific context */ - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; unsigned int key_len; uint8_t key[AES_KEYSIZE_128]; }; @@ -105,6 +105,7 @@ struct dcp_async_ctx { struct dcp_aes_req_ctx { unsigned int enc:1; unsigned int ecb:1; + struct skcipher_request fallback_req; // keep at the end }; struct dcp_sha_req_ctx { @@ -426,21 +427,20 @@ static int dcp_chan_thread_aes(void *data) static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); int ret; - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, req->cryptlen, req->iv); if (enc) - ret = crypto_skcipher_encrypt(subreq); + ret = crypto_skcipher_encrypt(&rctx->fallback_req); else - ret = crypto_skcipher_decrypt(subreq); - - skcipher_request_zero(subreq); + ret = crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -510,24 +510,25 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, * but is supported by in-kernel software implementation, we use * software fallback. */ - crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(actx->fallback, + crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(actx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(actx->fallback, key, len); + return crypto_skcipher_setkey(actx->fallback, key, len); } static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) { const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *blk; + struct crypto_skcipher *blk; - blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(blk)) return PTR_ERR(blk); actx->fallback = blk; - crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + + crypto_skcipher_reqsize(blk)); return 0; } @@ -535,7 +536,7 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) { struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(actx->fallback); + crypto_free_skcipher(actx->fallback); } /* From dc6e71c9d9cfef98b95cb6e7e91ed2bb64b63e76 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:32:00 +0300 Subject: [PATCH 086/157] crypto: picoxcell - permit asynchronous skcipher as fallback Even though the picoxcell driver implements asynchronous versions of ecb(aes) and cbc(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Reviewed-by: Jamie Iles Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 38 ++++++++++++++++++------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 7384e91c8b32..13503e16ce1d 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -86,6 +86,7 @@ struct spacc_req { dma_addr_t src_addr, dst_addr; struct spacc_ddt *src_ddt, *dst_ddt; void (*complete)(struct spacc_req *req); + struct skcipher_request fallback_req; // keep at the end }; struct spacc_aead { @@ -158,7 +159,7 @@ struct spacc_ablk_ctx { * The fallback cipher. If the operation can't be done in hardware, * fallback to a software version. */ - struct crypto_sync_skcipher *sw_cipher; + struct crypto_skcipher *sw_cipher; }; /* AEAD cipher context. */ @@ -792,13 +793,13 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, * Set the fallback transform to use the same request flags as * the hardware transform. */ - crypto_sync_skcipher_clear_flags(ctx->sw_cipher, + crypto_skcipher_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->sw_cipher, + crypto_skcipher_set_flags(ctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len); + err = crypto_skcipher_setkey(ctx->sw_cipher, key, len); if (err) goto sw_setkey_failed; } @@ -900,7 +901,7 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req, struct crypto_tfm *old_tfm = crypto_skcipher_tfm(crypto_skcipher_reqtfm(req)); struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher); + struct spacc_req *dev_req = skcipher_request_ctx(req); int err; /* @@ -908,13 +909,13 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req, * the ciphering has completed, put the old transform back into the * request. */ - skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher); - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, + skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher); + skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst, req->cryptlen, req->iv); - err = is_encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) : + crypto_skcipher_decrypt(&dev_req->fallback_req); return err; } @@ -1007,19 +1008,24 @@ static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm) ctx->generic.flags = spacc_alg->type; ctx->generic.engine = engine; if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) { - ctx->sw_cipher = crypto_alloc_sync_skcipher( - alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); + ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->sw_cipher)) { dev_warn(engine->dev, "failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ctx->sw_cipher); } + crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) + + crypto_skcipher_reqsize(ctx->sw_cipher)); + } else { + /* take the size without the fallback skcipher_request at the end */ + crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req, + fallback_req)); } + ctx->generic.key_offs = spacc_alg->key_offs; ctx->generic.iv_offs = spacc_alg->iv_offs; - crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req)); - return 0; } @@ -1027,7 +1033,7 @@ static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm) { struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->sw_cipher); + crypto_free_skcipher(ctx->sw_cipher); } static int spacc_ablk_encrypt(struct skcipher_request *req) From 90e2f782719fc5ad2af63096815a69c5320704cb Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:32:01 +0300 Subject: [PATCH 087/157] crypto: qce - permit asynchronous skcipher as fallback Even though the qce driver implements asynchronous versions of ecb(aes), cbc(aes)and xts(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. While at it, remove the pointless memset() from qce_skcipher_init(), and remove the call to it qce_skcipher_init_fallback(). Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/qce/cipher.h | 3 ++- drivers/crypto/qce/skcipher.c | 42 ++++++++++++++++++----------------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h index 7770660bc853..cffa9fc628ff 100644 --- a/drivers/crypto/qce/cipher.h +++ b/drivers/crypto/qce/cipher.h @@ -14,7 +14,7 @@ struct qce_cipher_ctx { u8 enc_key[QCE_MAX_KEY_SIZE]; unsigned int enc_keylen; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; /** @@ -43,6 +43,7 @@ struct qce_cipher_reqctx { struct sg_table src_tbl; struct scatterlist *src_sg; unsigned int cryptlen; + struct skcipher_request fallback_req; // keep at the end }; static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm) diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index 9412433f3b21..a8147381b774 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -178,7 +178,7 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, break; } - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (!ret) ctx->enc_keylen = keylen; return ret; @@ -235,16 +235,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) req->cryptlen <= aes_sw_max_len) || (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && req->cryptlen % QCE_SECTOR_SIZE))) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - ret = encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -263,10 +262,9 @@ static int qce_skcipher_decrypt(struct skcipher_request *req) static int qce_skcipher_init(struct crypto_skcipher *tfm) { - struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - - memset(ctx, 0, sizeof(*ctx)); - crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); + /* take the size without the fallback skcipher_request at the end */ + crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx, + fallback_req)); return 0; } @@ -274,17 +272,21 @@ static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - qce_skcipher_init(tfm); - ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), - 0, CRYPTO_ALG_NEED_FALLBACK); - return PTR_ERR_OR_ZERO(ctx->fallback); + ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), + 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback)) + return PTR_ERR(ctx->fallback); + + crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) + + crypto_skcipher_reqsize(ctx->fallback)); + return 0; } static void qce_skcipher_exit(struct crypto_skcipher *tfm) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); } struct qce_skcipher_def { From 56ca499f1b58235d5b3f247a887cc636d849d351 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:32:02 +0300 Subject: [PATCH 088/157] crypto: sahara - permit asynchronous skcipher as fallback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Even though the sahara driver implements asynchronous versions of ecb(aes) and cbc(aes), the fallbacks it allocates are required to be synchronous. Given that SIMD based software implementations are usually asynchronous as well, even though they rarely complete asynchronously (this typically only happens in cases where the request was made from softirq context, while SIMD was already in use in the task context that it interrupted), these implementations are disregarded, and either the generic C version or another table based version implemented in assembler is selected instead. Since falling back to synchronous AES is not only a performance issue, but potentially a security issue as well (due to the fact that table based AES is not time invariant), let's fix this, by allocating an ordinary skcipher as the fallback, and invoke it with the completion routine that was given to the outer request. Signed-off-by: Ard Biesheuvel Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/sahara.c | 96 +++++++++++++++++++---------------------- 1 file changed, 45 insertions(+), 51 deletions(-) diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 466e30bd529c..0c8cb23ae708 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -146,11 +146,12 @@ struct sahara_ctx { /* AES-specific context */ int keylen; u8 key[AES_KEYSIZE_128]; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; struct sahara_aes_reqctx { unsigned long mode; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -617,10 +618,10 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, /* * The requested key size is not supported by HW, do a fallback. */ - crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & + crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + return crypto_skcipher_setkey(ctx->fallback, key, keylen); } static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) @@ -651,21 +652,19 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) static int sahara_aes_ecb_encrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_encrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_ENCRYPT); @@ -673,21 +672,19 @@ static int sahara_aes_ecb_encrypt(struct skcipher_request *req) static int sahara_aes_ecb_decrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_decrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, 0); @@ -695,21 +692,19 @@ static int sahara_aes_ecb_decrypt(struct skcipher_request *req) static int sahara_aes_cbc_encrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_encrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); @@ -717,21 +712,19 @@ static int sahara_aes_cbc_encrypt(struct skcipher_request *req) static int sahara_aes_cbc_decrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_decrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_CBC); @@ -742,14 +735,15 @@ static int sahara_aes_init_tfm(struct crypto_skcipher *tfm) const char *name = crypto_tfm_alg_name(&tfm->base); struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm); - ctx->fallback = crypto_alloc_sync_skcipher(name, 0, + ctx->fallback = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) { pr_err("Error allocating fallback algo %s\n", name); return PTR_ERR(ctx->fallback); } - crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) + + crypto_skcipher_reqsize(ctx->fallback)); return 0; } @@ -758,7 +752,7 @@ static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm) { struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); } static u32 sahara_sha_init_hdr(struct sahara_dev *dev, From f441ba2ad34102692b9836923776f017b40afc88 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Jul 2020 09:32:03 +0300 Subject: [PATCH 089/157] crypto: mediatek - use AES library for GCM key derivation The Mediatek accelerator driver calls into a dynamically allocated skcipher of the ctr(aes) variety to perform GCM key derivation, which involves AES encryption of a single block consisting of NUL bytes. There is no point in using the skcipher API for this, so use the AES library interface instead. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 3 +- drivers/crypto/mediatek/mtk-aes.c | 63 ++++--------------------------- 2 files changed, 9 insertions(+), 57 deletions(-) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 7bc58bf99703..585ad584e421 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -758,10 +758,9 @@ config CRYPTO_DEV_ZYNQMP_AES config CRYPTO_DEV_MEDIATEK tristate "MediaTek's EIP97 Cryptographic Engine driver" depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST - select CRYPTO_AES + select CRYPTO_LIB_AES select CRYPTO_AEAD select CRYPTO_SKCIPHER - select CRYPTO_CTR select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 78d660d963e2..4ad3571ab6af 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -137,8 +137,6 @@ struct mtk_aes_gcm_ctx { u32 authsize; size_t textlen; - - struct crypto_skcipher *ctr; }; struct mtk_aes_drv { @@ -996,17 +994,8 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, u32 keylen) { struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); - struct crypto_skcipher *ctr = gctx->ctr; - struct { - u32 hash[4]; - u8 iv[8]; - - struct crypto_wait wait; - - struct scatterlist sg[1]; - struct skcipher_request req; - } *data; + u8 hash[AES_BLOCK_SIZE] __aligned(4) = {}; + struct crypto_aes_ctx aes_ctx; int err; switch (keylen) { @@ -1026,39 +1015,18 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, ctx->keylen = SIZE_IN_WORDS(keylen); - /* Same as crypto_gcm_setkey() from crypto/gcm.c */ - crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & - CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(ctr, key, keylen); + err = aes_expandkey(&aes_ctx, key, keylen); if (err) return err; - data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr), - GFP_KERNEL); - if (!data) - return -ENOMEM; - - crypto_init_wait(&data->wait); - sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); - skcipher_request_set_tfm(&data->req, ctr); - skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | - CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &data->wait); - skcipher_request_set_crypt(&data->req, data->sg, data->sg, - AES_BLOCK_SIZE, data->iv); - - err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), - &data->wait); - if (err) - goto out; + aes_encrypt(&aes_ctx, hash, hash); + memzero_explicit(&aes_ctx, sizeof(aes_ctx)); mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); - mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash, + mtk_aes_write_state_be(ctx->key + ctx->keylen, (const u32 *)hash, AES_BLOCK_SIZE); -out: - kzfree(data); - return err; + + return 0; } static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead, @@ -1095,32 +1063,17 @@ static int mtk_aes_gcm_init(struct crypto_aead *aead) { struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); - ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->ctr)) { - pr_err("Error allocating ctr(aes)\n"); - return PTR_ERR(ctx->ctr); - } - crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx)); ctx->base.start = mtk_aes_gcm_start; return 0; } -static void mtk_aes_gcm_exit(struct crypto_aead *aead) -{ - struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); - - crypto_free_skcipher(ctx->ctr); -} - static struct aead_alg aes_gcm_alg = { .setkey = mtk_aes_gcm_setkey, .setauthsize = mtk_aes_gcm_setauthsize, .encrypt = mtk_aes_gcm_encrypt, .decrypt = mtk_aes_gcm_decrypt, .init = mtk_aes_gcm_init, - .exit = mtk_aes_gcm_exit, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, From 06cc2afbbdf9a9e8df3e2f8db724997dd6e1b4ac Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2020 12:41:13 +1000 Subject: [PATCH 090/157] crypto: lib/chacha20poly1305 - Add missing function declaration This patch adds a declaration for chacha20poly1305_selftest to silence a sparse warning. Signed-off-by: Herbert Xu --- include/crypto/chacha20poly1305.h | 2 ++ lib/crypto/chacha20poly1305.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h index 234ee28078ef..d2ac3ff7dc1e 100644 --- a/include/crypto/chacha20poly1305.h +++ b/include/crypto/chacha20poly1305.h @@ -45,4 +45,6 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len const u64 nonce, const u8 key[CHACHA20POLY1305_KEY_SIZE]); +bool chacha20poly1305_selftest(void); + #endif /* __CHACHA20POLY1305_H */ diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index ad0699ce702f..431e04280332 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -21,8 +21,6 @@ #define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) -bool __init chacha20poly1305_selftest(void); - static void chacha_load_key(u32 *k, const u8 *in) { k[0] = get_unaligned_le32(in); From e79a31715193686e92dadb4caedfbb1f5de3659c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 8 Jul 2020 12:11:18 +0300 Subject: [PATCH 091/157] crypto: x86/chacha-sse3 - use unaligned loads for state array Due to the fact that the x86 port does not support allocating objects on the stack with an alignment that exceeds 8 bytes, we have a rather ugly hack in the x86 code for ChaCha to ensure that the state array is aligned to 16 bytes, allowing the SSE3 implementation of the algorithm to use aligned loads. Given that the performance benefit of using of aligned loads appears to be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and the fact that this hack has leaked into generic ChaCha code, let's just remove it. Cc: Martin Willi Cc: Herbert Xu Cc: Eric Biggers Signed-off-by: Ard Biesheuvel Reviewed-by: Martin Willi Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/chacha-ssse3-x86_64.S | 16 ++++++++-------- arch/x86/crypto/chacha_glue.c | 17 ++--------------- include/crypto/chacha.h | 4 ---- 3 files changed, 10 insertions(+), 27 deletions(-) diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S index a38ab2512a6f..ca1788bfee16 100644 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S @@ -120,10 +120,10 @@ SYM_FUNC_START(chacha_block_xor_ssse3) FRAME_BEGIN # x0..3 = s0..3 - movdqa 0x00(%rdi),%xmm0 - movdqa 0x10(%rdi),%xmm1 - movdqa 0x20(%rdi),%xmm2 - movdqa 0x30(%rdi),%xmm3 + movdqu 0x00(%rdi),%xmm0 + movdqu 0x10(%rdi),%xmm1 + movdqu 0x20(%rdi),%xmm2 + movdqu 0x30(%rdi),%xmm3 movdqa %xmm0,%xmm8 movdqa %xmm1,%xmm9 movdqa %xmm2,%xmm10 @@ -205,10 +205,10 @@ SYM_FUNC_START(hchacha_block_ssse3) # %edx: nrounds FRAME_BEGIN - movdqa 0x00(%rdi),%xmm0 - movdqa 0x10(%rdi),%xmm1 - movdqa 0x20(%rdi),%xmm2 - movdqa 0x30(%rdi),%xmm3 + movdqu 0x00(%rdi),%xmm0 + movdqu 0x10(%rdi),%xmm1 + movdqu 0x20(%rdi),%xmm2 + movdqu 0x30(%rdi),%xmm3 mov %edx,%r8d call chacha_permute diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index 22250091cdbe..e67a59130025 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -14,8 +14,6 @@ #include #include -#define CHACHA_STATE_ALIGN 16 - asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, @@ -124,8 +122,6 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) { hchacha_block_generic(state, stream, nrounds); } else { @@ -138,8 +134,6 @@ EXPORT_SYMBOL(hchacha_block_arch); void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - chacha_init_generic(state, key, iv); } EXPORT_SYMBOL(chacha_init_arch); @@ -147,8 +141,6 @@ EXPORT_SYMBOL(chacha_init_arch); void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() || bytes <= CHACHA_BLOCK_SIZE) return chacha_crypt_generic(state, dst, src, bytes, nrounds); @@ -170,15 +162,12 @@ EXPORT_SYMBOL(chacha_crypt_arch); static int chacha_simd_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv) { - u32 *state, state_buf[16 + 2] __aligned(8); + u32 state[CHACHA_STATE_WORDS] __aligned(8); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); - chacha_init_generic(state, ctx->key, iv); while (walk.nbytes > 0) { @@ -217,12 +206,10 @@ static int xchacha_simd(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *state, state_buf[16 + 2] __aligned(8); + u32 state[CHACHA_STATE_WORDS] __aligned(8); struct chacha_ctx subctx; u8 real_iv[16]; - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); chacha_init_generic(state, ctx->key, req->iv); if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) { diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 2676f4fbd4c1..3a1c72fdb7cf 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -25,11 +25,7 @@ #define CHACHA_BLOCK_SIZE 64 #define CHACHAPOLY_IV_SIZE 12 -#ifdef CONFIG_X86_64 -#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32)) -#else #define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) -#endif /* 192-bit nonce, then 64-bit stream position */ #define XCHACHA_IV_SIZE 32 From 6e8f972fc3fb03797abb88feee6704dccada93f6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 8 Jul 2020 09:39:39 -0700 Subject: [PATCH 092/157] crypto: sparc - rename sha256 to sha256_alg To avoid a naming collision when we add a sha256() library function, rename the "sha256" static variable in sha256_glue.c to "sha256_alg". For consistency, also rename "sha224" to "sha224_alg". Reported-by: kernel test robot Cc: sparclinux@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/sparc/crypto/sha256_glue.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c index 286bc8ecf15b..ca2547df9652 100644 --- a/arch/sparc/crypto/sha256_glue.c +++ b/arch/sparc/crypto/sha256_glue.c @@ -156,7 +156,7 @@ static int sha256_sparc64_import(struct shash_desc *desc, const void *in) return 0; } -static struct shash_alg sha256 = { +static struct shash_alg sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_sparc64_init, .update = sha256_sparc64_update, @@ -174,7 +174,7 @@ static struct shash_alg sha256 = { } }; -static struct shash_alg sha224 = { +static struct shash_alg sha224_alg = { .digestsize = SHA224_DIGEST_SIZE, .init = sha224_sparc64_init, .update = sha256_sparc64_update, @@ -206,13 +206,13 @@ static bool __init sparc64_has_sha256_opcode(void) static int __init sha256_sparc64_mod_init(void) { if (sparc64_has_sha256_opcode()) { - int ret = crypto_register_shash(&sha224); + int ret = crypto_register_shash(&sha224_alg); if (ret < 0) return ret; - ret = crypto_register_shash(&sha256); + ret = crypto_register_shash(&sha256_alg); if (ret < 0) { - crypto_unregister_shash(&sha224); + crypto_unregister_shash(&sha224_alg); return ret; } @@ -225,8 +225,8 @@ static int __init sha256_sparc64_mod_init(void) static void __exit sha256_sparc64_mod_fini(void) { - crypto_unregister_shash(&sha224); - crypto_unregister_shash(&sha256); + crypto_unregister_shash(&sha224_alg); + crypto_unregister_shash(&sha256_alg); } module_init(sha256_sparc64_mod_init); From 9ea9c58b40a441a0babef8c615acedcfb3733919 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 8 Jul 2020 09:39:40 -0700 Subject: [PATCH 093/157] crypto: lib/sha256 - add sha256() function Add a function sha256() which computes a SHA-256 digest in one step, combining sha256_init() + sha256_update() + sha256_final(). This is similar to how we also have blake2s(). Reviewed-by: Ard Biesheuvel Tested-by: Hans de Goede Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/sha.h | 1 + lib/crypto/sha256.c | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 10753ff71d46..4ff3da816630 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -147,6 +147,7 @@ static inline void sha256_init(struct sha256_state *sctx) } void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); void sha256_final(struct sha256_state *sctx, u8 *out); +void sha256(const u8 *data, unsigned int len, u8 *out); static inline void sha224_init(struct sha256_state *sctx) { diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 2e621697c5c3..2321f6cb322f 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -280,4 +280,14 @@ void sha224_final(struct sha256_state *sctx, u8 *out) } EXPORT_SYMBOL(sha224_final); +void sha256(const u8 *data, unsigned int len, u8 *out) +{ + struct sha256_state sctx; + + sha256_init(&sctx); + sha256_update(&sctx, data, len); + sha256_final(&sctx, out); +} +EXPORT_SYMBOL(sha256); + MODULE_LICENSE("GPL"); From 2164960df8f531a06805fdd28d41758ce7691f75 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 8 Jul 2020 09:39:41 -0700 Subject: [PATCH 094/157] efi: use sha256() instead of open coding Now that there's a function that calculates the SHA-256 digest of a buffer in one step, use it instead of sha256_init() + sha256_update() + sha256_final(). Reviewed-by: Ard Biesheuvel Tested-by: Hans de Goede Cc: linux-efi@vger.kernel.org Cc: Ard Biesheuvel Cc: Hans de Goede Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/firmware/efi/embedded-firmware.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c index a1b199de9006..e97a9c9d010c 100644 --- a/drivers/firmware/efi/embedded-firmware.c +++ b/drivers/firmware/efi/embedded-firmware.c @@ -37,9 +37,8 @@ static const struct dmi_system_id * const embedded_fw_table[] = { static int __init efi_check_md_for_embedded_firmware( efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc) { - struct sha256_state sctx; struct efi_embedded_fw *fw; - u8 sha256[32]; + u8 hash[32]; u64 i, size; u8 *map; @@ -54,10 +53,8 @@ static int __init efi_check_md_for_embedded_firmware( if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN)) continue; - sha256_init(&sctx); - sha256_update(&sctx, map + i, desc->length); - sha256_final(&sctx, sha256); - if (memcmp(sha256, desc->sha256, 32) == 0) + sha256(map + i, desc->length, hash); + if (memcmp(hash, desc->sha256, 32) == 0) break; } if ((i + desc->length) > size) { From 5a7a0d9400677a5e88fdb362c4d32171384068f2 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 8 Jul 2020 09:39:42 -0700 Subject: [PATCH 095/157] mptcp: use sha256() instead of open coding Now that there's a function that calculates the SHA-256 digest of a buffer in one step, use it instead of sha256_init() + sha256_update() + sha256_final(). Reviewed-by: Ard Biesheuvel Acked-by: Matthieu Baerts Cc: mptcp@lists.01.org Cc: Mat Martineau Cc: Matthieu Baerts Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- net/mptcp/crypto.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c index 3d980713a9e2..82bd2b54d741 100644 --- a/net/mptcp/crypto.c +++ b/net/mptcp/crypto.c @@ -32,11 +32,8 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn) { __be32 mptcp_hashed_key[SHA256_DIGEST_WORDS]; __be64 input = cpu_to_be64(key); - struct sha256_state state; - sha256_init(&state); - sha256_update(&state, (__force u8 *)&input, sizeof(input)); - sha256_final(&state, (u8 *)mptcp_hashed_key); + sha256((__force u8 *)&input, sizeof(input), (u8 *)mptcp_hashed_key); if (token) *token = be32_to_cpu(mptcp_hashed_key[0]); @@ -47,7 +44,6 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn) void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) { u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE]; - struct sha256_state state; u8 key1be[8]; u8 key2be[8]; int i; @@ -67,13 +63,10 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) memcpy(&input[SHA256_BLOCK_SIZE], msg, len); - sha256_init(&state); - sha256_update(&state, input, SHA256_BLOCK_SIZE + len); - /* emit sha256(K1 || msg) on the second input block, so we can * reuse 'input' for the last hashing */ - sha256_final(&state, &input[SHA256_BLOCK_SIZE]); + sha256(input, SHA256_BLOCK_SIZE + len, &input[SHA256_BLOCK_SIZE]); /* Prepare second part of hmac */ memset(input, 0x5C, SHA256_BLOCK_SIZE); @@ -82,9 +75,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) for (i = 0; i < 8; i++) input[i + 8] ^= key2be[i]; - sha256_init(&state); - sha256_update(&state, input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE); - sha256_final(&state, (u8 *)hmac); + sha256(input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE, hmac); } #ifdef CONFIG_MPTCP_HMAC_TEST From 9d4cafa5a04aeba68cc12de47ffcd975079e2f83 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 8 Jul 2020 09:39:43 -0700 Subject: [PATCH 096/157] ASoC: cros_ec_codec: use sha256() instead of open coding Now that there's a function that calculates the SHA-256 digest of a buffer in one step, use it instead of sha256_init() + sha256_update() + sha256_final(). Also simplify the code by inlining calculate_sha256() into its caller and switching a debug log statement to use %*phN instead of bin2hex(). Acked-by: Tzung-Bi Shih Reviewed-by: Ard Biesheuvel Cc: alsa-devel@alsa-project.org Cc: Ard Biesheuvel Cc: Cheng-Yi Chiang Cc: Enric Balletbo i Serra Cc: Guenter Roeck Cc: Tzung-Bi Shih Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- sound/soc/codecs/cros_ec_codec.c | 27 ++------------------------- 1 file changed, 2 insertions(+), 25 deletions(-) diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c index 8d45c628e988..ab009c7dfdf4 100644 --- a/sound/soc/codecs/cros_ec_codec.c +++ b/sound/soc/codecs/cros_ec_codec.c @@ -103,28 +103,6 @@ error: return ret; } -static int calculate_sha256(struct cros_ec_codec_priv *priv, - uint8_t *buf, uint32_t size, uint8_t *digest) -{ - struct sha256_state sctx; - - sha256_init(&sctx); - sha256_update(&sctx, buf, size); - sha256_final(&sctx, digest); - -#ifdef DEBUG - { - char digest_str[65]; - - bin2hex(digest_str, digest, 32); - digest_str[64] = 0; - dev_dbg(priv->dev, "hash=%s\n", digest_str); - } -#endif - - return 0; -} - static int dmic_get_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -782,9 +760,8 @@ static int wov_hotword_model_put(struct snd_kcontrol *kcontrol, if (IS_ERR(buf)) return PTR_ERR(buf); - ret = calculate_sha256(priv, buf, size, digest); - if (ret) - goto leave; + sha256(buf, size, digest); + dev_dbg(priv->dev, "hash=%*phN\n", SHA256_DIGEST_SIZE, digest); p.cmd = EC_CODEC_WOV_GET_LANG; ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV, From 8832cfd3a036ebf7b4a5d9423b60e8d8b5a2f339 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Thu, 9 Jul 2020 12:35:39 +0200 Subject: [PATCH 097/157] hwrng: ks-sa - Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Herbert Xu --- drivers/char/hw_random/ks-sa-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index 001617033d6a..8f1d47ff9799 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -2,7 +2,7 @@ /* * Random Number Generator driver for the Keystone SOC * - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com * * Authors: Sandeep Nair * Vitaly Andrianov From 2c2e18369f62da8217be0fbca3b94160da75cba3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 9 Jul 2020 22:44:04 +1000 Subject: [PATCH 098/157] crypto: ccp - Silence strncpy warning This patch kills an strncpy by using strscpy instead. The name would be silently truncated if it is too long. Signed-off-by: Herbert Xu Acked-by: John Allen Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-sha.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index b0cc2bd73af8..2bc29736fa45 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "ccp-crypto.h" @@ -424,7 +425,7 @@ static int ccp_register_hmac_alg(struct list_head *head, *ccp_alg = *base_alg; INIT_LIST_HEAD(&ccp_alg->entry); - strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); alg = &ccp_alg->alg; alg->setkey = ccp_sha_setkey; From d7866e503bdca854e6bc64521f030404d7cab642 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Thu, 9 Jul 2020 17:08:57 +0200 Subject: [PATCH 099/157] crypto: x86 - Remove include/asm/inst.h Current minimum required version of binutils is 2.23, which supports PSHUFB, PCLMULQDQ, PEXTRD, AESKEYGENASSIST, AESIMC, AESENC, AESENCLAST, AESDEC, AESDECLAST and MOVQ instruction mnemonics. Substitute macros from include/asm/inst.h with a proper instruction mnemonics in various assmbly files from x86/crypto directory, and remove now unneeded file. The patch was tested by calculating and comparing sha256sum hashes of stripped object files before and after the patch, to be sure that executable code didn't change. Signed-off-by: Uros Bizjak CC: Herbert Xu CC: "David S. Miller" CC: Thomas Gleixner CC: Ingo Molnar CC: Borislav Petkov CC: "H. Peter Anvin" Signed-off-by: Herbert Xu --- arch/x86/crypto/aes_ctrby8_avx-x86_64.S | 1 - arch/x86/crypto/aesni-intel_asm.S | 733 +++++++++++----------- arch/x86/crypto/aesni-intel_avx-x86_64.S | 1 - arch/x86/crypto/crc32-pclmul_asm.S | 47 +- arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 5 +- arch/x86/crypto/ghash-clmulni-intel_asm.S | 17 +- arch/x86/include/asm/inst.h | 311 --------- 7 files changed, 399 insertions(+), 716 deletions(-) delete mode 100644 arch/x86/include/asm/inst.h diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 494a3bda8487..3f0fc7dd87d7 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -63,7 +63,6 @@ */ #include -#include #define VMOVDQ vmovdqu diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 7d4298e6d4cb..1852b19a73a0 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -26,7 +26,6 @@ */ #include -#include #include #include @@ -201,7 +200,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff mov \SUBKEY, %r12 movdqu (%r12), \TMP3 movdqa SHUF_MASK(%rip), \TMP2 - PSHUFB_XMM \TMP2, \TMP3 + pshufb \TMP2, \TMP3 # precompute HashKey<<1 mod poly from the HashKey (required for GHASH) @@ -263,7 +262,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv movdqa SHUF_MASK(%rip), %xmm2 - PSHUFB_XMM %xmm2, %xmm0 + pshufb %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 @@ -347,7 +346,7 @@ _zero_cipher_left_\@: paddd ONE(%rip), %xmm0 # INCR CNT to get Yn movdqu %xmm0, CurCount(%arg2) movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10, %xmm0 + pshufb %xmm10, %xmm0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) movdqu %xmm0, PBlockEncKey(%arg2) @@ -377,7 +376,7 @@ _large_enough_update_\@: # get the appropriate shuffle mask movdqu (%r12), %xmm2 # shift right 16-r13 bytes - PSHUFB_XMM %xmm2, %xmm1 + pshufb %xmm2, %xmm1 _data_read_\@: lea ALL_F+16(%rip), %r12 @@ -393,12 +392,12 @@ _data_read_\@: .ifc \operation, dec pand %xmm1, %xmm2 movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10 ,%xmm2 + pshufb %xmm10 ,%xmm2 pxor %xmm2, %xmm8 .else movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10,%xmm0 + pshufb %xmm10,%xmm0 pxor %xmm0, %xmm8 .endif @@ -408,17 +407,17 @@ _data_read_\@: # GHASH computation for the last <16 byte block movdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm0 back to output as ciphertext - PSHUFB_XMM %xmm10, %xmm0 + pshufb %xmm10, %xmm0 .endif # Output %r13 bytes - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (%arg3 , %r11, 1) add $8, %r11 psrldq $8, %xmm0 - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: mov %al, (%arg3, %r11, 1) @@ -449,7 +448,7 @@ _partial_done\@: movd %r12d, %xmm15 # len(A) in %xmm15 mov InLen(%arg2), %r12 shl $3, %r12 # len(C) in bits (*128) - MOVQ_R64_XMM %r12, %xmm1 + movq %r12, %xmm1 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C) @@ -457,7 +456,7 @@ _partial_done\@: GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10, %xmm8 + pshufb %xmm10, %xmm8 movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0) @@ -470,7 +469,7 @@ _return_T_\@: cmp $8, %r11 jl _T_4_\@ _T_8_\@: - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax mov %rax, (%r10) add $8, %r10 sub $8, %r11 @@ -518,9 +517,9 @@ _return_T_done_\@: pshufd $78, \HK, \TMP3 pxor \GH, \TMP2 # TMP2 = a1+a0 pxor \HK, \TMP3 # TMP3 = b1+b0 - PCLMULQDQ 0x11, \HK, \TMP1 # TMP1 = a1*b1 - PCLMULQDQ 0x00, \HK, \GH # GH = a0*b0 - PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) + pclmulqdq $0x11, \HK, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x00, \HK, \GH # GH = a0*b0 + pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) pxor \GH, \TMP2 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0) movdqa \TMP2, \TMP3 @@ -570,7 +569,7 @@ _return_T_done_\@: cmp $8, \DLEN jl _read_lt8_\@ mov (\DPTR), %rax - MOVQ_R64_XMM %rax, \XMMDst + movq %rax, \XMMDst sub $8, \DLEN jz _done_read_partial_block_\@ xor %eax, %eax @@ -579,7 +578,7 @@ _read_next_byte_\@: mov 7(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_\@ - MOVQ_R64_XMM %rax, \XMM1 + movq %rax, \XMM1 pslldq $8, \XMM1 por \XMM1, \XMMDst jmp _done_read_partial_block_\@ @@ -590,7 +589,7 @@ _read_next_byte_lt8_\@: mov -1(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_lt8_\@ - MOVQ_R64_XMM %rax, \XMMDst + movq %rax, \XMMDst _done_read_partial_block_\@: .endm @@ -608,7 +607,7 @@ _done_read_partial_block_\@: jl _get_AAD_rest\@ _get_AAD_blocks\@: movdqu (%r10), \TMP7 - PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data + pshufb %xmm14, \TMP7 # byte-reflect the AAD data pxor \TMP7, \TMP6 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5 add $16, %r10 @@ -624,7 +623,7 @@ _get_AAD_rest\@: je _get_AAD_done\@ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7 - PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data + pshufb %xmm14, \TMP7 # byte-reflect the AAD data pxor \TMP6, \TMP7 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5 movdqu \TMP7, \TMP6 @@ -667,7 +666,7 @@ _data_read_\@: # Finished reading in data # r16-r13 is the number of bytes in plaintext mod 16) add %r13, %r12 movdqu (%r12), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm9 # shift right r13 bytes + pshufb %xmm2, %xmm9 # shift right r13 bytes .ifc \operation, dec movdqa %xmm1, %xmm3 @@ -689,8 +688,8 @@ _no_extra_mask_1_\@: pand %xmm1, %xmm3 movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10, %xmm3 - PSHUFB_XMM %xmm2, %xmm3 + pshufb %xmm10, %xmm3 + pshufb %xmm2, %xmm3 pxor %xmm3, \AAD_HASH cmp $0, %r10 @@ -724,8 +723,8 @@ _no_extra_mask_2_\@: pand %xmm1, %xmm9 movdqa SHUF_MASK(%rip), %xmm1 - PSHUFB_XMM %xmm1, %xmm9 - PSHUFB_XMM %xmm2, %xmm9 + pshufb %xmm1, %xmm9 + pshufb %xmm2, %xmm9 pxor %xmm9, \AAD_HASH cmp $0, %r10 @@ -744,8 +743,8 @@ _encode_done_\@: movdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm9 back to output as ciphertext - PSHUFB_XMM %xmm10, %xmm9 - PSHUFB_XMM %xmm2, %xmm9 + pshufb %xmm10, %xmm9 + pshufb %xmm2, %xmm9 .endif # output encrypted Bytes cmp $0, %r10 @@ -759,14 +758,14 @@ _partial_fill_\@: mov \PLAIN_CYPH_LEN, %r13 _count_set_\@: movdqa %xmm9, %xmm0 - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) add $8, \DATA_OFFSET psrldq $8, %xmm0 - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) @@ -810,7 +809,7 @@ _partial_block_done_\@: .else MOVADQ \XMM0, %xmm\index .endif - PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap + pshufb %xmm14, %xmm\index # perform a 16 byte swap pxor \TMP2, %xmm\index .endr lea 0x10(%arg1),%r10 @@ -821,7 +820,7 @@ _partial_block_done_\@: aes_loop_initial_\@: MOVADQ (%r10),\TMP1 .irpc index, \i_seq - AESENC \TMP1, %xmm\index + aesenc \TMP1, %xmm\index .endr add $16,%r10 sub $1,%eax @@ -829,7 +828,7 @@ aes_loop_initial_\@: MOVADQ (%r10), \TMP1 .irpc index, \i_seq - AESENCLAST \TMP1, %xmm\index # Last Round + aesenclast \TMP1, %xmm\index # Last Round .endr .irpc index, \i_seq movdqu (%arg4 , %r11, 1), \TMP1 @@ -841,7 +840,7 @@ aes_loop_initial_\@: .ifc \operation, dec movdqa \TMP1, %xmm\index .endif - PSHUFB_XMM %xmm14, %xmm\index + pshufb %xmm14, %xmm\index # prepare plaintext/ciphertext for GHASH computation .endr @@ -876,19 +875,19 @@ aes_loop_initial_\@: MOVADQ ONE(%RIP),\TMP1 paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM1 - PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap + pshufb %xmm14, \XMM1 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM2 - PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap + pshufb %xmm14, \XMM2 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM3 - PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap + pshufb %xmm14, \XMM3 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM4 - PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap + pshufb %xmm14, \XMM4 # perform a 16 byte swap MOVADQ 0(%arg1),\TMP1 pxor \TMP1, \XMM1 @@ -897,17 +896,17 @@ aes_loop_initial_\@: pxor \TMP1, \XMM4 .irpc index, 1234 # do 4 rounds movaps 0x10*\index(%arg1), \TMP1 - AESENC \TMP1, \XMM1 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 .endr .irpc index, 56789 # do next 5 rounds movaps 0x10*\index(%arg1), \TMP1 - AESENC \TMP1, \XMM1 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 .endr lea 0xa0(%arg1),%r10 mov keysize,%eax @@ -918,7 +917,7 @@ aes_loop_initial_\@: aes_loop_pre_\@: MOVADQ (%r10),\TMP2 .irpc index, 1234 - AESENC \TMP2, %xmm\index + aesenc \TMP2, %xmm\index .endr add $16,%r10 sub $1,%eax @@ -926,10 +925,10 @@ aes_loop_pre_\@: aes_loop_pre_done\@: MOVADQ (%r10), \TMP2 - AESENCLAST \TMP2, \XMM1 - AESENCLAST \TMP2, \XMM2 - AESENCLAST \TMP2, \XMM3 - AESENCLAST \TMP2, \XMM4 + aesenclast \TMP2, \XMM1 + aesenclast \TMP2, \XMM2 + aesenclast \TMP2, \XMM3 + aesenclast \TMP2, \XMM4 movdqu 16*0(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM1 .ifc \operation, dec @@ -961,12 +960,12 @@ aes_loop_pre_done\@: .endif add $64, %r11 - PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap + pshufb %xmm14, \XMM1 # perform a 16 byte swap pxor \XMMDst, \XMM1 # combine GHASHed value with the corresponding ciphertext - PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap + pshufb %xmm14, \XMM2 # perform a 16 byte swap + pshufb %xmm14, \XMM3 # perform a 16 byte swap + pshufb %xmm14, \XMM4 # perform a 16 byte swap _initial_blocks_done\@: @@ -994,7 +993,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT movdqu HashKey_4(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM2 @@ -1002,51 +1001,51 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM0, \XMM3 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM4 - PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap - PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0 - PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + pshufb %xmm15, \XMM1 # perform a 16 byte swap + pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0 + pshufb %xmm15, \XMM2 # perform a 16 byte swap + pshufb %xmm15, \XMM3 # perform a 16 byte swap + pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor (%arg1), \XMM1 pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 movdqu HashKey_4_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 - AESENC \TMP1, \XMM1 # Round 1 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 # Round 1 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 movaps 0x20(%arg1), \TMP1 - AESENC \TMP1, \XMM1 # Round 2 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 # Round 2 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 movdqu HashKey_3(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 3 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0 + aesenc \TMP3, \XMM1 # Round 3 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0 movaps 0x40(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 4 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 4 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 movdqu HashKey_3_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 5 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 5 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM6, \XMM5 @@ -1058,25 +1057,25 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation # Multiply TMP5 * HashKey using karatsuba - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x60(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 6 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0 + aesenc \TMP3, \XMM1 # Round 6 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0 movaps 0x70(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 7 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 7 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 movdqu HashKey_2_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 8 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 8 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM7, \XMM5 @@ -1089,13 +1088,13 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 movdqu HashKey(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 9 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 + aesenc \TMP3, \XMM1 # Round 9 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0 lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 @@ -1105,7 +1104,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation aes_loop_par_enc\@: MOVADQ (%r10),\TMP3 .irpc index, 1234 - AESENC \TMP3, %xmm\index + aesenc \TMP3, %xmm\index .endr add $16,%r10 sub $1,%eax @@ -1113,12 +1112,12 @@ aes_loop_par_enc\@: aes_loop_par_enc_done\@: MOVADQ (%r10), \TMP3 - AESENCLAST \TMP3, \XMM1 # Round 10 - AESENCLAST \TMP3, \XMM2 - AESENCLAST \TMP3, \XMM3 - AESENCLAST \TMP3, \XMM4 + aesenclast \TMP3, \XMM1 # Round 10 + aesenclast \TMP3, \XMM2 + aesenclast \TMP3, \XMM3 + aesenclast \TMP3, \XMM4 movdqu HashKey_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK movdqu 16(%arg4,%r11,1), \TMP3 @@ -1131,10 +1130,10 @@ aes_loop_par_enc_done\@: movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer - PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + pshufb %xmm15, \XMM1 # perform a 16 byte swap + pshufb %xmm15, \XMM2 # perform a 16 byte swap + pshufb %xmm15, \XMM3 # perform a 16 byte swap + pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor \TMP4, \TMP1 pxor \XMM8, \XMM5 @@ -1202,7 +1201,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT movdqu HashKey_4(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM2 @@ -1210,51 +1209,51 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM0, \XMM3 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM4 - PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap - PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0 - PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + pshufb %xmm15, \XMM1 # perform a 16 byte swap + pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0 + pshufb %xmm15, \XMM2 # perform a 16 byte swap + pshufb %xmm15, \XMM3 # perform a 16 byte swap + pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor (%arg1), \XMM1 pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 movdqu HashKey_4_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 - AESENC \TMP1, \XMM1 # Round 1 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 # Round 1 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 movaps 0x20(%arg1), \TMP1 - AESENC \TMP1, \XMM1 # Round 2 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 # Round 2 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 movdqu HashKey_3(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 3 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0 + aesenc \TMP3, \XMM1 # Round 3 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0 movaps 0x40(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 4 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 4 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 movdqu HashKey_3_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 5 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 5 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM6, \XMM5 @@ -1266,25 +1265,25 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation # Multiply TMP5 * HashKey using karatsuba - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x60(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 6 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0 + aesenc \TMP3, \XMM1 # Round 6 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0 movaps 0x70(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 7 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 7 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 movdqu HashKey_2_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 8 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 8 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM7, \XMM5 @@ -1297,13 +1296,13 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 movdqu HashKey(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 9 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 + aesenc \TMP3, \XMM1 # Round 9 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0 lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 @@ -1313,7 +1312,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation aes_loop_par_dec\@: MOVADQ (%r10),\TMP3 .irpc index, 1234 - AESENC \TMP3, %xmm\index + aesenc \TMP3, %xmm\index .endr add $16,%r10 sub $1,%eax @@ -1321,12 +1320,12 @@ aes_loop_par_dec\@: aes_loop_par_dec_done\@: MOVADQ (%r10), \TMP3 - AESENCLAST \TMP3, \XMM1 # last round - AESENCLAST \TMP3, \XMM2 - AESENCLAST \TMP3, \XMM3 - AESENCLAST \TMP3, \XMM4 + aesenclast \TMP3, \XMM1 # last round + aesenclast \TMP3, \XMM2 + aesenclast \TMP3, \XMM3 + aesenclast \TMP3, \XMM4 movdqu HashKey_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer @@ -1343,10 +1342,10 @@ aes_loop_par_dec_done\@: pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM4 - PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + pshufb %xmm15, \XMM1 # perform a 16 byte swap + pshufb %xmm15, \XMM2 # perform a 16 byte swap + pshufb %xmm15, \XMM3 # perform a 16 byte swap + pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor \TMP4, \TMP1 pxor \XMM8, \XMM5 @@ -1402,10 +1401,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst pshufd $78, \XMM1, \TMP2 pxor \XMM1, \TMP2 movdqu HashKey_4(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 - PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 + pclmulqdq $0x11, \TMP5, \TMP6 # TMP6 = a1*b1 + pclmulqdq $0x00, \TMP5, \XMM1 # XMM1 = a0*b0 movdqu HashKey_4_k(%arg2), \TMP4 - PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqa \XMM1, \XMMDst movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 @@ -1415,10 +1414,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst pshufd $78, \XMM2, \TMP2 pxor \XMM2, \TMP2 movdqu HashKey_3(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 - PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x00, \TMP5, \XMM2 # XMM2 = a0*b0 movdqu HashKey_3_k(%arg2), \TMP4 - PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM2, \XMMDst pxor \TMP2, \XMM1 @@ -1430,10 +1429,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst pshufd $78, \XMM3, \TMP2 pxor \XMM3, \TMP2 movdqu HashKey_2(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 - PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x00, \TMP5, \XMM3 # XMM3 = a0*b0 movdqu HashKey_2_k(%arg2), \TMP4 - PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM3, \XMMDst pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1 @@ -1443,10 +1442,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst pshufd $78, \XMM4, \TMP2 pxor \XMM4, \TMP2 movdqu HashKey(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 - PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x00, \TMP5, \XMM4 # XMM4 = a0*b0 movdqu HashKey_k(%arg2), \TMP4 - PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM4, \XMMDst pxor \XMM1, \TMP2 @@ -1504,13 +1503,13 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst _esb_loop_\@: MOVADQ (%r10),\TMP1 - AESENC \TMP1,\XMM0 + aesenc \TMP1,\XMM0 add $16,%r10 sub $1,%eax jnz _esb_loop_\@ MOVADQ (%r10),\TMP1 - AESENCLAST \TMP1,\XMM0 + aesenclast \TMP1,\XMM0 .endm /***************************************************************************** * void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1849,72 +1848,72 @@ SYM_FUNC_START(aesni_set_key) movups 0x10(UKEYP), %xmm2 # other user key movaps %xmm2, (TKEYP) add $0x10, TKEYP - AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 + aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 call _key_expansion_256a - AESKEYGENASSIST 0x1 %xmm0 %xmm1 + aeskeygenassist $0x1, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 + aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 call _key_expansion_256a - AESKEYGENASSIST 0x2 %xmm0 %xmm1 + aeskeygenassist $0x2, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3 + aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 call _key_expansion_256a - AESKEYGENASSIST 0x4 %xmm0 %xmm1 + aeskeygenassist $0x4, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4 + aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 call _key_expansion_256a - AESKEYGENASSIST 0x8 %xmm0 %xmm1 + aeskeygenassist $0x8, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5 + aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 call _key_expansion_256a - AESKEYGENASSIST 0x10 %xmm0 %xmm1 + aeskeygenassist $0x10, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6 + aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 call _key_expansion_256a - AESKEYGENASSIST 0x20 %xmm0 %xmm1 + aeskeygenassist $0x20, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7 + aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 call _key_expansion_256a jmp .Ldec_key .Lenc_key192: movq 0x10(UKEYP), %xmm2 # other user key - AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 + aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 call _key_expansion_192a - AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 + aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 call _key_expansion_192b - AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3 + aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 call _key_expansion_192a - AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4 + aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 call _key_expansion_192b - AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5 + aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 call _key_expansion_192a - AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6 + aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 call _key_expansion_192b - AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7 + aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 call _key_expansion_192a - AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8 + aeskeygenassist $0x80, %xmm2, %xmm1 # round 8 call _key_expansion_192b jmp .Ldec_key .Lenc_key128: - AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1 + aeskeygenassist $0x1, %xmm0, %xmm1 # round 1 call _key_expansion_128 - AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2 + aeskeygenassist $0x2, %xmm0, %xmm1 # round 2 call _key_expansion_128 - AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3 + aeskeygenassist $0x4, %xmm0, %xmm1 # round 3 call _key_expansion_128 - AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4 + aeskeygenassist $0x8, %xmm0, %xmm1 # round 4 call _key_expansion_128 - AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5 + aeskeygenassist $0x10, %xmm0, %xmm1 # round 5 call _key_expansion_128 - AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6 + aeskeygenassist $0x20, %xmm0, %xmm1 # round 6 call _key_expansion_128 - AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7 + aeskeygenassist $0x40, %xmm0, %xmm1 # round 7 call _key_expansion_128 - AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8 + aeskeygenassist $0x80, %xmm0, %xmm1 # round 8 call _key_expansion_128 - AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9 + aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9 call _key_expansion_128 - AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10 + aeskeygenassist $0x36, %xmm0, %xmm1 # round 10 call _key_expansion_128 .Ldec_key: sub $0x10, TKEYP @@ -1927,7 +1926,7 @@ SYM_FUNC_START(aesni_set_key) .align 4 .Ldec_key_loop: movaps (KEYP), %xmm0 - AESIMC %xmm0 %xmm1 + aesimc %xmm0, %xmm1 movaps %xmm1, (UKEYP) add $0x10, KEYP sub $0x10, UKEYP @@ -1988,37 +1987,37 @@ SYM_FUNC_START_LOCAL(_aesni_enc1) je .Lenc192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps -0x50(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE .align 4 .Lenc192: movaps -0x40(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps -0x30(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE .align 4 .Lenc128: movaps -0x20(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps -0x10(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps (TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x10(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x20(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x30(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x40(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x50(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x60(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x70(TKEYP), KEY - AESENCLAST KEY STATE + aesenclast KEY, STATE ret SYM_FUNC_END(_aesni_enc1) @@ -2054,79 +2053,79 @@ SYM_FUNC_START_LOCAL(_aesni_enc4) je .L4enc192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps -0x50(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 #.align 4 .L4enc192: movaps -0x40(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps -0x30(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 #.align 4 .L4enc128: movaps -0x20(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps -0x10(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps (TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x10(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x20(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x30(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x40(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x50(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x60(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x70(TKEYP), KEY - AESENCLAST KEY STATE1 # last round - AESENCLAST KEY STATE2 - AESENCLAST KEY STATE3 - AESENCLAST KEY STATE4 + aesenclast KEY, STATE1 # last round + aesenclast KEY, STATE2 + aesenclast KEY, STATE3 + aesenclast KEY, STATE4 ret SYM_FUNC_END(_aesni_enc4) @@ -2178,37 +2177,37 @@ SYM_FUNC_START_LOCAL(_aesni_dec1) je .Ldec192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps -0x50(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE .align 4 .Ldec192: movaps -0x40(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps -0x30(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE .align 4 .Ldec128: movaps -0x20(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps -0x10(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps (TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x10(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x20(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x30(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x40(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x50(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x60(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x70(TKEYP), KEY - AESDECLAST KEY STATE + aesdeclast KEY, STATE ret SYM_FUNC_END(_aesni_dec1) @@ -2244,79 +2243,79 @@ SYM_FUNC_START_LOCAL(_aesni_dec4) je .L4dec192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps -0x50(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 .align 4 .L4dec192: movaps -0x40(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps -0x30(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 .align 4 .L4dec128: movaps -0x20(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps -0x10(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps (TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x10(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x20(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x30(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x40(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x50(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x60(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x70(TKEYP), KEY - AESDECLAST KEY STATE1 # last round - AESDECLAST KEY STATE2 - AESDECLAST KEY STATE3 - AESDECLAST KEY STATE4 + aesdeclast KEY, STATE1 # last round + aesdeclast KEY, STATE2 + aesdeclast KEY, STATE3 + aesdeclast KEY, STATE4 ret SYM_FUNC_END(_aesni_dec4) @@ -2599,10 +2598,10 @@ SYM_FUNC_END(aesni_cbc_dec) SYM_FUNC_START_LOCAL(_aesni_inc_init) movaps .Lbswap_mask, BSWAP_MASK movaps IV, CTR - PSHUFB_XMM BSWAP_MASK CTR + pshufb BSWAP_MASK, CTR mov $1, TCTR_LOW - MOVQ_R64_XMM TCTR_LOW INC - MOVQ_R64_XMM CTR TCTR_LOW + movq TCTR_LOW, INC + movq CTR, TCTR_LOW ret SYM_FUNC_END(_aesni_inc_init) @@ -2630,7 +2629,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc) psrldq $8, INC .Linc_low: movaps CTR, IV - PSHUFB_XMM BSWAP_MASK IV + pshufb BSWAP_MASK, IV ret SYM_FUNC_END(_aesni_inc) diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 0cea33295287..5fee47956f3b 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -120,7 +120,6 @@ ## #include -#include # constants in mergeable sections, linker can reorder and merge .section .rodata.cst16.POLY, "aM", @progbits, 16 diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index 9fd28ff65bc2..6e7d4c4d3208 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S @@ -38,7 +38,6 @@ */ #include -#include .section .rodata @@ -129,17 +128,17 @@ loop_64:/* 64 bytes Full cache line folding */ #ifdef __x86_64__ movdqa %xmm4, %xmm8 #endif - PCLMULQDQ 00, CONSTANT, %xmm1 - PCLMULQDQ 00, CONSTANT, %xmm2 - PCLMULQDQ 00, CONSTANT, %xmm3 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm2 + pclmulqdq $0x00, CONSTANT, %xmm3 #ifdef __x86_64__ - PCLMULQDQ 00, CONSTANT, %xmm4 + pclmulqdq $0x00, CONSTANT, %xmm4 #endif - PCLMULQDQ 0x11, CONSTANT, %xmm5 - PCLMULQDQ 0x11, CONSTANT, %xmm6 - PCLMULQDQ 0x11, CONSTANT, %xmm7 + pclmulqdq $0x11, CONSTANT, %xmm5 + pclmulqdq $0x11, CONSTANT, %xmm6 + pclmulqdq $0x11, CONSTANT, %xmm7 #ifdef __x86_64__ - PCLMULQDQ 0x11, CONSTANT, %xmm8 + pclmulqdq $0x11, CONSTANT, %xmm8 #endif pxor %xmm5, %xmm1 pxor %xmm6, %xmm2 @@ -149,8 +148,8 @@ loop_64:/* 64 bytes Full cache line folding */ #else /* xmm8 unsupported for x32 */ movdqa %xmm4, %xmm5 - PCLMULQDQ 00, CONSTANT, %xmm4 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm4 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm4 #endif @@ -172,20 +171,20 @@ less_64:/* Folding cache line into 128bit */ prefetchnta (BUF) movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm2, %xmm1 movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm3, %xmm1 movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm4, %xmm1 @@ -193,8 +192,8 @@ less_64:/* Folding cache line into 128bit */ jb fold_64 loop_16:/* Folding rest buffer into 128bit */ movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor (BUF), %xmm1 sub $0x10, LEN @@ -205,7 +204,7 @@ loop_16:/* Folding rest buffer into 128bit */ fold_64: /* perform the last 64 bit fold, also adds 32 zeroes * to the input stream */ - PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */ + pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */ psrldq $0x08, %xmm1 pxor CONSTANT, %xmm1 @@ -220,7 +219,7 @@ fold_64: #endif psrldq $0x04, %xmm2 pand %xmm3, %xmm1 - PCLMULQDQ 0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ @@ -231,11 +230,11 @@ fold_64: #endif movdqa %xmm1, %xmm2 pand %xmm3, %xmm1 - PCLMULQDQ 0x10, CONSTANT, %xmm1 + pclmulqdq $0x10, CONSTANT, %xmm1 pand %xmm3, %xmm1 - PCLMULQDQ 0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 - PEXTRD 0x01, %xmm1, %eax + pextrd $0x01, %xmm1, %eax ret SYM_FUNC_END(crc32_pclmul_le_16) diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 8501ec4532f4..d84c8bf64f02 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -43,7 +43,6 @@ * SOFTWARE. */ -#include #include #include @@ -225,10 +224,10 @@ LABEL crc_ %i subq %rax, tmp # tmp -= rax*24 movq crc_init, %xmm1 # CRC for block 1 - PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2 + pclmulqdq $0x00, %xmm0, %xmm1 # Multiply by K2 movq crc1, %xmm2 # CRC for block 2 - PCLMULQDQ 0x10, %xmm0, %xmm2 # Multiply by K1 + pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1 pxor %xmm2,%xmm1 movq %xmm1, %rax diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index bb9735fbb865..99ac25e18e09 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -14,7 +14,6 @@ */ #include -#include #include .section .rodata.cst16.bswap_mask, "aM", @progbits, 16 @@ -51,9 +50,9 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble) pxor DATA, T2 pxor SHASH, T3 - PCLMULQDQ 0x00 SHASH DATA # DATA = a0 * b0 - PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1 - PCLMULQDQ 0x00 T3 T2 # T2 = (a1 + a0) * (b1 + b0) + pclmulqdq $0x00, SHASH, DATA # DATA = a0 * b0 + pclmulqdq $0x11, SHASH, T1 # T1 = a1 * b1 + pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0) pxor DATA, T2 pxor T1, T2 # T2 = a0 * b1 + a1 * b0 @@ -95,9 +94,9 @@ SYM_FUNC_START(clmul_ghash_mul) movups (%rdi), DATA movups (%rsi), SHASH movaps .Lbswap_mask, BSWAP - PSHUFB_XMM BSWAP DATA + pshufb BSWAP, DATA call __clmul_gf128mul_ble - PSHUFB_XMM BSWAP DATA + pshufb BSWAP, DATA movups DATA, (%rdi) FRAME_END ret @@ -114,18 +113,18 @@ SYM_FUNC_START(clmul_ghash_update) movaps .Lbswap_mask, BSWAP movups (%rdi), DATA movups (%rcx), SHASH - PSHUFB_XMM BSWAP DATA + pshufb BSWAP, DATA .align 4 .Lupdate_loop: movups (%rsi), IN1 - PSHUFB_XMM BSWAP IN1 + pshufb BSWAP, IN1 pxor IN1, DATA call __clmul_gf128mul_ble sub $16, %rdx add $16, %rsi cmp $16, %rdx jge .Lupdate_loop - PSHUFB_XMM BSWAP DATA + pshufb BSWAP, DATA movups DATA, (%rdi) .Lupdate_just_ret: FRAME_END diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h deleted file mode 100644 index f5a796da07f8..000000000000 --- a/arch/x86/include/asm/inst.h +++ /dev/null @@ -1,311 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Generate .byte code for some instructions not supported by old - * binutils. - */ -#ifndef X86_ASM_INST_H -#define X86_ASM_INST_H - -#ifdef __ASSEMBLY__ - -#define REG_NUM_INVALID 100 - -#define REG_TYPE_R32 0 -#define REG_TYPE_R64 1 -#define REG_TYPE_XMM 2 -#define REG_TYPE_INVALID 100 - - .macro R32_NUM opd r32 - \opd = REG_NUM_INVALID - .ifc \r32,%eax - \opd = 0 - .endif - .ifc \r32,%ecx - \opd = 1 - .endif - .ifc \r32,%edx - \opd = 2 - .endif - .ifc \r32,%ebx - \opd = 3 - .endif - .ifc \r32,%esp - \opd = 4 - .endif - .ifc \r32,%ebp - \opd = 5 - .endif - .ifc \r32,%esi - \opd = 6 - .endif - .ifc \r32,%edi - \opd = 7 - .endif -#ifdef CONFIG_X86_64 - .ifc \r32,%r8d - \opd = 8 - .endif - .ifc \r32,%r9d - \opd = 9 - .endif - .ifc \r32,%r10d - \opd = 10 - .endif - .ifc \r32,%r11d - \opd = 11 - .endif - .ifc \r32,%r12d - \opd = 12 - .endif - .ifc \r32,%r13d - \opd = 13 - .endif - .ifc \r32,%r14d - \opd = 14 - .endif - .ifc \r32,%r15d - \opd = 15 - .endif -#endif - .endm - - .macro R64_NUM opd r64 - \opd = REG_NUM_INVALID -#ifdef CONFIG_X86_64 - .ifc \r64,%rax - \opd = 0 - .endif - .ifc \r64,%rcx - \opd = 1 - .endif - .ifc \r64,%rdx - \opd = 2 - .endif - .ifc \r64,%rbx - \opd = 3 - .endif - .ifc \r64,%rsp - \opd = 4 - .endif - .ifc \r64,%rbp - \opd = 5 - .endif - .ifc \r64,%rsi - \opd = 6 - .endif - .ifc \r64,%rdi - \opd = 7 - .endif - .ifc \r64,%r8 - \opd = 8 - .endif - .ifc \r64,%r9 - \opd = 9 - .endif - .ifc \r64,%r10 - \opd = 10 - .endif - .ifc \r64,%r11 - \opd = 11 - .endif - .ifc \r64,%r12 - \opd = 12 - .endif - .ifc \r64,%r13 - \opd = 13 - .endif - .ifc \r64,%r14 - \opd = 14 - .endif - .ifc \r64,%r15 - \opd = 15 - .endif -#endif - .endm - - .macro XMM_NUM opd xmm - \opd = REG_NUM_INVALID - .ifc \xmm,%xmm0 - \opd = 0 - .endif - .ifc \xmm,%xmm1 - \opd = 1 - .endif - .ifc \xmm,%xmm2 - \opd = 2 - .endif - .ifc \xmm,%xmm3 - \opd = 3 - .endif - .ifc \xmm,%xmm4 - \opd = 4 - .endif - .ifc \xmm,%xmm5 - \opd = 5 - .endif - .ifc \xmm,%xmm6 - \opd = 6 - .endif - .ifc \xmm,%xmm7 - \opd = 7 - .endif - .ifc \xmm,%xmm8 - \opd = 8 - .endif - .ifc \xmm,%xmm9 - \opd = 9 - .endif - .ifc \xmm,%xmm10 - \opd = 10 - .endif - .ifc \xmm,%xmm11 - \opd = 11 - .endif - .ifc \xmm,%xmm12 - \opd = 12 - .endif - .ifc \xmm,%xmm13 - \opd = 13 - .endif - .ifc \xmm,%xmm14 - \opd = 14 - .endif - .ifc \xmm,%xmm15 - \opd = 15 - .endif - .endm - - .macro REG_TYPE type reg - R32_NUM reg_type_r32 \reg - R64_NUM reg_type_r64 \reg - XMM_NUM reg_type_xmm \reg - .if reg_type_r64 <> REG_NUM_INVALID - \type = REG_TYPE_R64 - .elseif reg_type_r32 <> REG_NUM_INVALID - \type = REG_TYPE_R32 - .elseif reg_type_xmm <> REG_NUM_INVALID - \type = REG_TYPE_XMM - .else - \type = REG_TYPE_INVALID - .endif - .endm - - .macro PFX_OPD_SIZE - .byte 0x66 - .endm - - .macro PFX_REX opd1 opd2 W=0 - .if ((\opd1 | \opd2) & 8) || \W - .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3) - .endif - .endm - - .macro MODRM mod opd1 opd2 - .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3) - .endm - - .macro PSHUFB_XMM xmm1 xmm2 - XMM_NUM pshufb_opd1 \xmm1 - XMM_NUM pshufb_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX pshufb_opd1 pshufb_opd2 - .byte 0x0f, 0x38, 0x00 - MODRM 0xc0 pshufb_opd1 pshufb_opd2 - .endm - - .macro PCLMULQDQ imm8 xmm1 xmm2 - XMM_NUM clmul_opd1 \xmm1 - XMM_NUM clmul_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX clmul_opd1 clmul_opd2 - .byte 0x0f, 0x3a, 0x44 - MODRM 0xc0 clmul_opd1 clmul_opd2 - .byte \imm8 - .endm - - .macro PEXTRD imm8 xmm gpr - R32_NUM extrd_opd1 \gpr - XMM_NUM extrd_opd2 \xmm - PFX_OPD_SIZE - PFX_REX extrd_opd1 extrd_opd2 - .byte 0x0f, 0x3a, 0x16 - MODRM 0xc0 extrd_opd1 extrd_opd2 - .byte \imm8 - .endm - - .macro AESKEYGENASSIST rcon xmm1 xmm2 - XMM_NUM aeskeygen_opd1 \xmm1 - XMM_NUM aeskeygen_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aeskeygen_opd1 aeskeygen_opd2 - .byte 0x0f, 0x3a, 0xdf - MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2 - .byte \rcon - .endm - - .macro AESIMC xmm1 xmm2 - XMM_NUM aesimc_opd1 \xmm1 - XMM_NUM aesimc_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesimc_opd1 aesimc_opd2 - .byte 0x0f, 0x38, 0xdb - MODRM 0xc0 aesimc_opd1 aesimc_opd2 - .endm - - .macro AESENC xmm1 xmm2 - XMM_NUM aesenc_opd1 \xmm1 - XMM_NUM aesenc_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesenc_opd1 aesenc_opd2 - .byte 0x0f, 0x38, 0xdc - MODRM 0xc0 aesenc_opd1 aesenc_opd2 - .endm - - .macro AESENCLAST xmm1 xmm2 - XMM_NUM aesenclast_opd1 \xmm1 - XMM_NUM aesenclast_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesenclast_opd1 aesenclast_opd2 - .byte 0x0f, 0x38, 0xdd - MODRM 0xc0 aesenclast_opd1 aesenclast_opd2 - .endm - - .macro AESDEC xmm1 xmm2 - XMM_NUM aesdec_opd1 \xmm1 - XMM_NUM aesdec_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesdec_opd1 aesdec_opd2 - .byte 0x0f, 0x38, 0xde - MODRM 0xc0 aesdec_opd1 aesdec_opd2 - .endm - - .macro AESDECLAST xmm1 xmm2 - XMM_NUM aesdeclast_opd1 \xmm1 - XMM_NUM aesdeclast_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesdeclast_opd1 aesdeclast_opd2 - .byte 0x0f, 0x38, 0xdf - MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2 - .endm - - .macro MOVQ_R64_XMM opd1 opd2 - REG_TYPE movq_r64_xmm_opd1_type \opd1 - .if movq_r64_xmm_opd1_type == REG_TYPE_XMM - XMM_NUM movq_r64_xmm_opd1 \opd1 - R64_NUM movq_r64_xmm_opd2 \opd2 - .else - R64_NUM movq_r64_xmm_opd1 \opd1 - XMM_NUM movq_r64_xmm_opd2 \opd2 - .endif - PFX_OPD_SIZE - PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1 - .if movq_r64_xmm_opd1_type == REG_TYPE_XMM - .byte 0x0f, 0x7e - .else - .byte 0x0f, 0x6e - .endif - MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2 - .endm -#endif - -#endif From e72b48c5e7fe0c9fabeb23385b6e6f02f0a78d37 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:36 -0700 Subject: [PATCH 100/157] crypto: geniv - remove unneeded arguments from aead_geniv_alloc() The type and mask arguments to aead_geniv_alloc() are always 0, so remove them. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/echainiv.c | 2 +- crypto/geniv.c | 7 ++++--- crypto/seqiv.c | 2 +- include/crypto/internal/geniv.h | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 4a2f02baba14..69686668625e 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl, struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); diff --git a/crypto/geniv.c b/crypto/geniv.c index 6a90c52d49ad..07496c8af0ab 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -39,7 +39,7 @@ static void aead_geniv_free(struct aead_instance *inst) } struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask) + struct rtattr **tb) { struct crypto_aead_spawn *spawn; struct crypto_attr_type *algt; @@ -47,6 +47,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct aead_alg *alg; unsigned int ivsize; unsigned int maxauthsize; + u32 mask; int err; algt = crypto_get_attr_type(tb); @@ -63,10 +64,10 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); /* Ignore async algorithms if necessary. */ - mask |= crypto_requires_sync(algt->type, algt->mask); + mask = crypto_requires_sync(algt->type, algt->mask); err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), type, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; diff --git a/crypto/seqiv.c b/crypto/seqiv.c index f124b9b54e15..e48f875a7aac 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -138,7 +138,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 229d37681a9d..7fd7126f593a 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -20,7 +20,7 @@ struct aead_geniv_ctx { }; struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask); + struct rtattr **tb); int aead_init_geniv(struct crypto_aead *tfm); void aead_exit_geniv(struct crypto_aead *tfm); From 4688111e78ed432b19c3cd6de545c83c47dc28b3 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:37 -0700 Subject: [PATCH 101/157] crypto: seqiv - remove seqiv_create() seqiv_create() is pointless because it just checks that the template is being instantiated as an AEAD, then calls seqiv_aead_create(). But seqiv_aead_create() does the exact same check, via aead_geniv_alloc(). Just remove seqiv_create() and use seqiv_aead_create() directly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/seqiv.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/crypto/seqiv.c b/crypto/seqiv.c index e48f875a7aac..23e22d8b63e6 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -164,23 +164,9 @@ free_inst: return err; } -static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct crypto_attr_type *algt; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) - return -EINVAL; - - return seqiv_aead_create(tmpl, tb); -} - static struct crypto_template seqiv_tmpl = { .name = "seqiv", - .create = seqiv_create, + .create = seqiv_aead_create, .module = THIS_MODULE, }; From 7bcb2c99f8ed032cfb3f5596b4dccac6b1f501df Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:38 -0700 Subject: [PATCH 102/157] crypto: algapi - use common mechanism for inheriting flags The flag CRYPTO_ALG_ASYNC is "inherited" in the sense that when a template is instantiated, the template will have CRYPTO_ALG_ASYNC set if any of the algorithms it uses has CRYPTO_ALG_ASYNC set. We'd like to add a second flag (CRYPTO_ALG_ALLOCATES_MEMORY) that gets "inherited" in the same way. This is difficult because the handling of CRYPTO_ALG_ASYNC is hardcoded everywhere. Address this by: - Add CRYPTO_ALG_INHERITED_FLAGS, which contains the set of flags that have these inheritance semantics. - Add crypto_algt_inherited_mask(), for use by template ->create() methods. It returns any of these flags that the user asked to be unset and thus must be passed in the 'mask' to crypto_grab_*(). - Also modify crypto_check_attr_type() to handle computing the 'mask' so that most templates can just use this. - Make crypto_grab_*() propagate these flags to the template instance being created so that templates don't have to do this themselves. Make crypto/simd.c propagate these flags too, since it "wraps" another algorithm, similar to a template. Based on a patch by Mikulas Patocka (https://lore.kernel.org/r/alpine.LRH.2.02.2006301414580.30526@file01.intranet.prod.int.rdu2.redhat.com). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 14 ++-------- crypto/algapi.c | 21 +++++++++++++- crypto/authenc.c | 14 ++-------- crypto/authencesn.c | 14 ++-------- crypto/ccm.c | 33 +++++++--------------- crypto/chacha20poly1305.c | 14 ++-------- crypto/cmac.c | 5 ++-- crypto/cryptd.c | 59 ++++++++++++++++++++------------------- crypto/ctr.c | 19 ++++--------- crypto/cts.c | 13 ++------- crypto/essiv.c | 11 ++++++-- crypto/gcm.c | 40 ++++++-------------------- crypto/geniv.c | 14 ++-------- crypto/hmac.c | 5 ++-- crypto/lrw.c | 13 ++------- crypto/pcrypt.c | 14 ++++------ crypto/rsa-pkcs1pad.c | 13 ++------- crypto/simd.c | 6 ++-- crypto/skcipher.c | 15 ++++------ crypto/vmac.c | 5 ++-- crypto/xcbc.c | 5 ++-- crypto/xts.c | 17 ++++------- include/crypto/algapi.h | 23 ++++++++++----- 23 files changed, 153 insertions(+), 234 deletions(-) diff --git a/crypto/adiantum.c b/crypto/adiantum.c index cf2b9f4103dd..7fbdc3270984 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -490,7 +490,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; const char *nhpoly1305_name; struct skcipher_instance *inst; @@ -500,14 +499,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_alg *hash_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -565,8 +559,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags & - CRYPTO_ALG_ASYNC; inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | diff --git a/crypto/algapi.c b/crypto/algapi.c index 92abdf675992..fdabf2675b63 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -690,6 +690,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, spawn->mask = mask; spawn->next = inst->spawns; inst->spawns = spawn; + inst->alg.cra_flags |= + (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS); err = 0; } up_write(&crypto_alg_sem); @@ -816,7 +818,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) } EXPORT_SYMBOL_GPL(crypto_get_attr_type); -int crypto_check_attr_type(struct rtattr **tb, u32 type) +/** + * crypto_check_attr_type() - check algorithm type and compute inherited mask + * @tb: the template parameters + * @type: the algorithm type the template would be instantiated as + * @mask_ret: (output) the mask that should be passed to crypto_grab_*() + * to restrict the flags of any inner algorithms + * + * Validate that the algorithm type the user requested is compatible with the + * one the template would actually be instantiated as. E.g., if the user is + * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because + * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm. + * + * Also compute the mask to use to restrict the flags of any inner algorithms. + * + * Return: 0 on success; -errno on failure + */ +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret) { struct crypto_attr_type *algt; @@ -827,6 +845,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) if ((algt->type ^ type) & algt->mask) return -EINVAL; + *mask_ret = crypto_algt_inherited_mask(algt); return 0; } EXPORT_SYMBOL_GPL(crypto_check_attr_type); diff --git a/crypto/authenc.c b/crypto/authenc.c index 775e7138fd10..670bf1a01d00 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst) static int crypto_authenc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; @@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 149b70df2a91..b60e61b1904c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst) static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; @@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/ccm.c b/crypto/ccm.c index d1fb01bbc814..494d70901186 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -447,7 +447,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; @@ -455,14 +454,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, struct hash_alg_common *mac; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -470,7 +464,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ictx = aead_instance_ctx(inst); err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst), - mac_name, 0, CRYPTO_ALG_ASYNC); + mac_name, 0, mask | CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; mac = crypto_spawn_ahash_alg(&ictx->mac); @@ -507,7 +501,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -712,21 +705,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst) static int crypto_rfc4309_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -759,7 +746,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -878,9 +864,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -890,7 +877,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index ccaea5cb66d1..97bbb135e9a6 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst) static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, const char *name, unsigned int ivsize) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; @@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (ivsize > CHACHAPOLY_IV_SIZE) return -EINVAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (chacha->base.cra_flags | - poly->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; diff --git a/crypto/cmac.c b/crypto/cmac.c index 143a6544c873..df36be1efb81 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -225,9 +225,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -237,7 +238,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 283212262adb..a1bea0f4baa8 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -191,17 +191,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) return ictx->queue; } -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, - u32 *mask) +static void cryptd_type_and_mask(struct crypto_attr_type *algt, + u32 *type, u32 *mask) { - struct crypto_attr_type *algt; + /* + * cryptd is allowed to wrap internal algorithms, but in that case the + * resulting cryptd instance will be marked as internal as well. + */ + *type = algt->type & CRYPTO_ALG_INTERNAL; + *mask = algt->mask & CRYPTO_ALG_INTERNAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return; + /* No point in cryptd wrapping an algorithm that's already async. */ + *mask |= CRYPTO_ALG_ASYNC; - *type |= algt->type & CRYPTO_ALG_INTERNAL; - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; + *mask |= crypto_algt_inherited_mask(algt); } static int cryptd_init_instance(struct crypto_instance *inst, @@ -364,6 +367,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) static int cryptd_create_skcipher(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct skcipherd_instance_ctx *ctx; @@ -373,10 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, u32 mask; int err; - type = 0; - mask = CRYPTO_ALG_ASYNC; - - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -395,9 +396,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); @@ -633,16 +633,17 @@ static void cryptd_hash_free(struct ahash_instance *inst) } static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; struct shash_alg *alg; - u32 type = 0; - u32 mask = 0; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -661,10 +662,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| CRYPTO_ALG_OPTIONAL_KEY)); - inst->alg.halg.digestsize = alg->digestsize; inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); @@ -820,16 +820,17 @@ static void cryptd_aead_free(struct aead_instance *inst) static int cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct aead_instance_ctx *ctx; struct aead_instance *inst; struct aead_alg *alg; - u32 type = 0; - u32 mask = CRYPTO_ALG_ASYNC; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -848,8 +849,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); inst->alg.ivsize = crypto_aead_alg_ivsize(alg); @@ -884,11 +885,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: - return cryptd_create_skcipher(tmpl, tb, &queue); + return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: - return cryptd_create_hash(tmpl, tb, &queue); + return cryptd_create_hash(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_AEAD: - return cryptd_create_aead(tmpl, tb, &queue); + return cryptd_create_aead(tmpl, tb, algt, &queue); } return -EINVAL; diff --git a/crypto/ctr.c b/crypto/ctr.c index 31ac4ae598e1..ae8d88c715d6 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -256,29 +256,22 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst) static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; struct skcipher_instance *inst; struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; u32 mask; - int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; - mask = crypto_requires_sync(algt->type, algt->mask) | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - spawn = skcipher_instance_ctx(inst); err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), @@ -310,8 +303,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.ivsize = CTR_RFC3686_IV_SIZE; inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + diff --git a/crypto/cts.c b/crypto/cts.c index 5e005c4f0221..3766d47ebcc0 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -325,19 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -364,7 +358,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/essiv.c b/crypto/essiv.c index a7f45dbc4ee2..d012be23d496 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -466,7 +466,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(shash_name); type = algt->type & algt->mask; - mask = crypto_requires_sync(algt->type, algt->mask); + mask = crypto_algt_inherited_mask(algt); switch (type) { case CRYPTO_ALG_TYPE_SKCIPHER: @@ -525,7 +525,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) /* Synchronous hash, e.g., "sha256" */ _hash_alg = crypto_alg_mod_lookup(shash_name, CRYPTO_ALG_TYPE_SHASH, - CRYPTO_ALG_TYPE_MASK); + CRYPTO_ALG_TYPE_MASK | mask); if (IS_ERR(_hash_alg)) { err = PTR_ERR(_hash_alg); goto out_drop_skcipher; @@ -557,7 +557,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_free_hash; - base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC; + /* + * hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its + * flags manually. + */ + base->cra_flags |= (hash_alg->base.cra_flags & + CRYPTO_ALG_INHERITED_FLAGS); base->cra_blocksize = block_base->cra_blocksize; base->cra_ctxsize = sizeof(struct essiv_tfm_ctx); base->cra_alignmask = block_base->cra_alignmask; diff --git a/crypto/gcm.c b/crypto/gcm.c index 0103d28c541e..3a36a9533c96 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; @@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, struct hash_alg_common *ghash; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (ghash->base.cra_flags | - ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -835,21 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst) static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -882,7 +868,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -1057,21 +1042,15 @@ static void crypto_rfc4543_free(struct aead_instance *inst) static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct aead_alg *alg; struct crypto_rfc4543_instance_ctx *ctx; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -1104,7 +1083,6 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/geniv.c b/crypto/geniv.c index 07496c8af0ab..bee4621b4f12 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -42,7 +42,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_aead_spawn *spawn; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; unsigned int ivsize; @@ -50,12 +49,9 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return ERR_PTR(-EINVAL); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -63,9 +59,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); - /* Ignore async algorithms if necessary. */ - mask = crypto_requires_sync(algt->type, algt->mask); - err = crypto_grab_aead(spawn, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) @@ -90,7 +83,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/hmac.c b/crypto/hmac.c index e38bfb948278..25856aa7ccbf 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -168,11 +168,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; + u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -182,7 +183,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_shash(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; salg = crypto_spawn_shash_alg(spawn); diff --git a/crypto/lrw.c b/crypto/lrw.c index 5b07a7c09296..a709c801ee45 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -297,21 +297,15 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -379,7 +373,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 8bddc65cd509..cbc383a1a3fe 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -226,18 +226,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst, } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, - u32 type, u32 mask) + struct crypto_attr_type *algt) { struct pcrypt_instance_ctx *ctx; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; + u32 mask = crypto_algt_inherited_mask(algt); int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; @@ -254,7 +250,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -263,7 +259,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); @@ -298,7 +294,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: - return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); + return pcrypt_create_aead(tmpl, tb, algt); } return -EINVAL; diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index d31031de51bc..4983b2b4a223 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -596,7 +596,6 @@ static void pkcs1pad_free(struct akcipher_instance *inst) static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; @@ -604,14 +603,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) const char *hash_name; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -658,7 +652,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) goto err_free_inst; } - inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = rsa_alg->base.cra_priority; inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); diff --git a/crypto/simd.c b/crypto/simd.c index 56885af49c24..edaa479a1ec5 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -171,7 +171,8 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; @@ -417,7 +418,8 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7221def7b9a7..3b93a74ad124 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -934,22 +934,17 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst) struct skcipher_instance *skcipher_alloc_instance_simple( struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct skcipher_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *cipher_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return ERR_PTR(-EINVAL); - - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return ERR_PTR(err); + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/vmac.c b/crypto/vmac.c index 2d906830df96..9b565d1040d6 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -620,9 +620,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -632,7 +633,7 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 598ec88abf0f..af3b7eb5d7c7 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -191,9 +191,10 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -203,7 +204,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xts.c b/crypto/xts.c index 3565f3b863a6..35a30610569b 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -331,19 +331,17 @@ static void crypto_xts_free(struct skcipher_instance *inst) static int create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct xts_instance_ctx *ctx; struct skcipher_alg *alg; const char *cipher_name; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -355,10 +353,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ctx = skcipher_instance_ctx(inst); - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK | - CRYPTO_ALG_ASYNC); - err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); if (err == -ENOENT) { @@ -415,7 +409,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 00a9cf98debe..da64c37482b4 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -116,7 +116,7 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); -int crypto_check_attr_type(struct rtattr **tb, u32 type); +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); const char *crypto_attr_alg_name(struct rtattr *rta); int crypto_attr_u32(struct rtattr *rta, u32 *num); int crypto_inst_setname(struct crypto_instance *inst, const char *name, @@ -235,18 +235,27 @@ static inline struct crypto_async_request *crypto_get_backlog( container_of(queue->backlog, struct crypto_async_request, list); } -static inline int crypto_requires_off(u32 type, u32 mask, u32 off) +static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) { - return (type ^ off) & mask & off; + return (algt->type ^ off) & algt->mask & off; } /* - * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. - * Otherwise returns zero. + * When an algorithm uses another algorithm (e.g., if it's an instance of a + * template), these are the flags that should always be set on the "outer" + * algorithm if any "inner" algorithm has them set. */ -static inline int crypto_requires_sync(u32 type, u32 mask) +#define CRYPTO_ALG_INHERITED_FLAGS CRYPTO_ALG_ASYNC + +/* + * Given the type and mask that specify the flags restrictions on a template + * instance being created, return the mask that should be passed to + * crypto_grab_*() (along with type=0) to honor any request the user made to + * have any of the CRYPTO_ALG_INHERITED_FLAGS clear. + */ +static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt) { - return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); + return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS); } noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); From 2eb27c11937ee9984c04b75d213a737291c5f58c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:39 -0700 Subject: [PATCH 103/157] crypto: algapi - add NEED_FALLBACK to INHERITED_FLAGS CRYPTO_ALG_NEED_FALLBACK is handled inconsistently. When it's requested to be clear, some templates propagate that request to child algorithms, while others don't. It's apparently desired for NEED_FALLBACK to be propagated, to avoid deadlocks where a module tries to load itself while it's being initialized, and to avoid unnecessarily complex fallback chains where we have e.g. cbc-aes-$driver falling back to cbc(aes-$driver) where aes-$driver itself falls back to aes-generic, instead of cbc-aes-$driver simply falling back to cbc(aes-generic). There have been a number of fixes to this effect: commit 89027579bc6c ("crypto: xts - Propagate NEED_FALLBACK bit") commit d2c2a85cfe82 ("crypto: ctr - Propagate NEED_FALLBACK bit") commit e6c2e65c70a6 ("crypto: cbc - Propagate NEED_FALLBACK bit") But it seems that other templates can have the same problems too. To avoid this whack-a-mole, just add NEED_FALLBACK to INHERITED_FLAGS so that it's always inherited. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ctr.c | 2 -- crypto/skcipher.c | 2 -- crypto/xts.c | 2 -- include/crypto/algapi.h | 3 ++- include/linux/crypto.h | 4 ++-- 5 files changed, 4 insertions(+), 9 deletions(-) diff --git a/crypto/ctr.c b/crypto/ctr.c index ae8d88c715d6..c39fcffba27f 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -265,8 +265,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return err; - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 3b93a74ad124..467af525848a 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -943,8 +943,6 @@ struct skcipher_instance *skcipher_alloc_instance_simple( err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return ERR_PTR(err); - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/xts.c b/crypto/xts.c index 35a30610569b..9a7adab6c3e1 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -340,8 +340,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return err; - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index da64c37482b4..22cf4d80959f 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -245,7 +245,8 @@ static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) * template), these are the flags that should always be set on the "outer" * algorithm if any "inner" algorithm has them set. */ -#define CRYPTO_ALG_INHERITED_FLAGS CRYPTO_ALG_ASYNC +#define CRYPTO_ALG_INHERITED_FLAGS \ + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK) /* * Given the type and mask that specify the flags restrictions on a template diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7cd2d00f0a05..f73f0b51e1cd 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -60,8 +60,8 @@ #define CRYPTO_ALG_ASYNC 0x00000080 /* - * Set this bit if and only if the algorithm requires another algorithm of - * the same type to handle corner cases. + * Set if the algorithm (or an algorithm which it uses) requires another + * algorithm of the same type to handle corner cases. */ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 From fbb6cda44190d72aa5199d728797aabc6d2ed816 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:40 -0700 Subject: [PATCH 104/157] crypto: algapi - introduce the flag CRYPTO_ALG_ALLOCATES_MEMORY Introduce a new algorithm flag CRYPTO_ALG_ALLOCATES_MEMORY. If this flag is set, then the driver allocates memory in its request routine. Such drivers are not suitable for disk encryption because GFP_ATOMIC allocation can fail anytime (causing random I/O errors) and GFP_KERNEL allocation can recurse into the block layer, causing a deadlock. For now, this flag is only implemented for some algorithm types. We also assume some usage constraints for it to be meaningful, since there are lots of edge cases the crypto API allows (e.g., misaligned or fragmented scatterlists) that mean that nearly any crypto algorithm can allocate memory in some case. See the comment for details. Also add this flag to CRYPTO_ALG_INHERITED_FLAGS so that when a template is instantiated, this flag is set on the template instance if it is set on any algorithm the instance uses. Based on a patch by Mikulas Patocka (https://lore.kernel.org/r/alpine.LRH.2.02.2006301414580.30526@file01.intranet.prod.int.rdu2.redhat.com). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/algapi.h | 3 ++- include/linux/crypto.h | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 22cf4d80959f..143d884d65c7 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -246,7 +246,8 @@ static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) * algorithm if any "inner" algorithm has them set. */ #define CRYPTO_ALG_INHERITED_FLAGS \ - (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK) + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \ + CRYPTO_ALG_ALLOCATES_MEMORY) /* * Given the type and mask that specify the flags restrictions on a template diff --git a/include/linux/crypto.h b/include/linux/crypto.h index f73f0b51e1cd..ef90e07c9635 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -100,6 +100,38 @@ */ #define CRYPTO_NOLOAD 0x00008000 +/* + * The algorithm may allocate memory during request processing, i.e. during + * encryption, decryption, or hashing. Users can request an algorithm with this + * flag unset if they can't handle memory allocation failures. + * + * This flag is currently only implemented for algorithms of type "skcipher", + * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not + * have this flag set even if they allocate memory. + * + * In some edge cases, algorithms can allocate memory regardless of this flag. + * To avoid these cases, users must obey the following usage constraints: + * skcipher: + * - The IV buffer and all scatterlist elements must be aligned to the + * algorithm's alignmask. + * - If the data were to be divided into chunks of size + * crypto_skcipher_walksize() (with any remainder going at the end), no + * chunk can cross a page boundary or a scatterlist element boundary. + * aead: + * - The IV buffer and all scatterlist elements must be aligned to the + * algorithm's alignmask. + * - The first scatterlist element must contain all the associated data, + * and its pages must be !PageHighMem. + * - If the plaintext/ciphertext were to be divided into chunks of size + * crypto_aead_walksize() (with the remainder going at the end), no chunk + * can cross a page boundary or a scatterlist element boundary. + * ahash: + * - The result buffer must be aligned to the algorithm's alignmask. + * - crypto_ahash_finup() must not be used unless the algorithm implements + * ->finup() natively. + */ +#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 + /* * Transform masks and values (for crt_flags). */ From b8aa7dc5c7535f9abfca4bceb0ade9ee10cf5f54 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 9 Jul 2020 23:20:41 -0700 Subject: [PATCH 105/157] crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY Set the flag CRYPTO_ALG_ALLOCATES_MEMORY in the crypto drivers that allocate memory. drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c: sun8i_ce_cipher drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c: sun8i_ss_cipher drivers/crypto/amlogic/amlogic-gxl-core.c: meson_cipher drivers/crypto/axis/artpec6_crypto.c: artpec6_crypto_common_init drivers/crypto/bcm/cipher.c: spu_skcipher_rx_sg_create drivers/crypto/caam/caamalg.c: aead_edesc_alloc drivers/crypto/caam/caamalg_qi.c: aead_edesc_alloc drivers/crypto/caam/caamalg_qi2.c: aead_edesc_alloc drivers/crypto/caam/caamhash.c: hash_digest_key drivers/crypto/cavium/cpt/cptvf_algs.c: process_request drivers/crypto/cavium/nitrox/nitrox_aead.c: nitrox_process_se_request drivers/crypto/cavium/nitrox/nitrox_skcipher.c: nitrox_process_se_request drivers/crypto/ccp/ccp-crypto-aes-cmac.c: ccp_do_cmac_update drivers/crypto/ccp/ccp-crypto-aes-galois.c: ccp_crypto_enqueue_request drivers/crypto/ccp/ccp-crypto-aes-xts.c: ccp_crypto_enqueue_request drivers/crypto/ccp/ccp-crypto-aes.c: ccp_crypto_enqueue_request drivers/crypto/ccp/ccp-crypto-des3.c: ccp_crypto_enqueue_request drivers/crypto/ccp/ccp-crypto-sha.c: ccp_crypto_enqueue_request drivers/crypto/chelsio/chcr_algo.c: create_cipher_wr drivers/crypto/hisilicon/sec/sec_algs.c: sec_alloc_and_fill_hw_sgl drivers/crypto/hisilicon/sec2/sec_crypto.c: sec_alloc_req_id drivers/crypto/inside-secure/safexcel_cipher.c: safexcel_queue_req drivers/crypto/inside-secure/safexcel_hash.c: safexcel_ahash_enqueue drivers/crypto/ixp4xx_crypto.c: ablk_perform drivers/crypto/marvell/cesa/cipher.c: mv_cesa_skcipher_dma_req_init drivers/crypto/marvell/cesa/hash.c: mv_cesa_ahash_dma_req_init drivers/crypto/marvell/octeontx/otx_cptvf_algs.c: create_ctx_hdr drivers/crypto/n2_core.c: n2_compute_chunks drivers/crypto/picoxcell_crypto.c: spacc_sg_to_ddt drivers/crypto/qat/qat_common/qat_algs.c: qat_alg_skcipher_encrypt drivers/crypto/qce/skcipher.c: qce_skcipher_async_req_handle drivers/crypto/talitos.c : talitos_edesc_alloc drivers/crypto/virtio/virtio_crypto_algs.c: __virtio_crypto_skcipher_do_req drivers/crypto/xilinx/zynqmp-aes-gcm.c: zynqmp_aes_aead_cipher Signed-off-by: Mikulas Patocka [EB: avoid overly-long lines] Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- .../crypto/allwinner/sun8i-ce/sun8i-ce-core.c | 12 +- .../crypto/allwinner/sun8i-ss/sun8i-ss-core.c | 12 +- drivers/crypto/amlogic/amlogic-gxl-core.c | 6 +- drivers/crypto/axis/artpec6_crypto.c | 20 ++- drivers/crypto/bcm/cipher.c | 72 ++++++++--- drivers/crypto/caam/caamalg.c | 6 +- drivers/crypto/caam/caamalg_qi.c | 6 +- drivers/crypto/caam/caamalg_qi2.c | 8 +- drivers/crypto/caam/caamhash.c | 2 +- drivers/crypto/cavium/cpt/cptvf_algs.c | 18 ++- drivers/crypto/cavium/nitrox/nitrox_aead.c | 4 +- .../crypto/cavium/nitrox/nitrox_skcipher.c | 16 +-- drivers/crypto/ccp/ccp-crypto-aes-cmac.c | 1 + drivers/crypto/ccp/ccp-crypto-aes-galois.c | 1 + drivers/crypto/ccp/ccp-crypto-aes-xts.c | 1 + drivers/crypto/ccp/ccp-crypto-aes.c | 2 + drivers/crypto/ccp/ccp-crypto-des3.c | 1 + drivers/crypto/ccp/ccp-crypto-sha.c | 1 + drivers/crypto/chelsio/chcr_algo.c | 7 +- drivers/crypto/hisilicon/sec/sec_algs.c | 24 ++-- drivers/crypto/hisilicon/sec2/sec_crypto.c | 4 +- .../crypto/inside-secure/safexcel_cipher.c | 47 +++++++ drivers/crypto/inside-secure/safexcel_hash.c | 18 +++ drivers/crypto/ixp4xx_crypto.c | 6 +- drivers/crypto/marvell/cesa/cipher.c | 18 ++- drivers/crypto/marvell/cesa/hash.c | 6 + .../crypto/marvell/octeontx/otx_cptvf_algs.c | 30 ++--- drivers/crypto/n2_core.c | 3 +- drivers/crypto/picoxcell_crypto.c | 17 ++- drivers/crypto/qat/qat_common/qat_algs.c | 13 +- drivers/crypto/qce/skcipher.c | 1 + drivers/crypto/talitos.c | 117 ++++++++++++------ drivers/crypto/virtio/virtio_crypto_algs.c | 3 +- drivers/crypto/xilinx/zynqmp-aes-gcm.c | 1 + 34 files changed, 361 insertions(+), 143 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index b957061424a1..138759dc8190 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -185,7 +185,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -211,7 +212,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -236,7 +238,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -262,7 +265,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 5d9d0fedcb06..9a23515783a6 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -169,7 +169,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -195,7 +196,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -220,7 +222,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -246,7 +249,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 411857fad8ba..466552acbbbb 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -54,7 +54,8 @@ static struct meson_alg_template mc_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -79,7 +80,8 @@ static struct meson_alg_template mc_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 62ba0325a618..1a46eeddf082 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2630,7 +2630,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "sha1", .cra_driver_name = "artpec-sha1", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2653,7 +2654,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "sha256", .cra_driver_name = "artpec-sha256", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2677,7 +2679,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "hmac(sha256)", .cra_driver_name = "artpec-hmac-sha256", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2696,7 +2699,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "ecb(aes)", .cra_driver_name = "artpec6-ecb-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2717,6 +2721,7 @@ static struct skcipher_alg crypto_algos[] = { .cra_driver_name = "artpec6-ctr-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), @@ -2738,7 +2743,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "cbc(aes)", .cra_driver_name = "artpec6-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2759,7 +2765,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "xts(aes)", .cra_driver_name = "artpec6-xts-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2790,6 +2797,7 @@ static struct aead_alg aead_algos[] = { .cra_driver_name = "artpec-gcm-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index a353217a0d33..8a7fa1ae1ade 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -3233,7 +3233,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3256,7 +3258,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3279,7 +3283,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3302,7 +3308,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3325,7 +3333,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3348,7 +3358,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3371,7 +3383,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3394,7 +3408,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3417,7 +3433,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3440,7 +3458,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3463,7 +3483,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3486,7 +3508,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3509,7 +3533,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3532,7 +3558,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3555,7 +3583,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3811,7 +3841,8 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "md5", .cra_driver_name = "md5-iproc", .cra_blocksize = MD5_BLOCK_WORDS * 4, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .cipher_info = { @@ -4508,7 +4539,9 @@ static int spu_register_skcipher(struct iproc_alg_s *driver_alg) crypto->base.cra_priority = cipher_pri; crypto->base.cra_alignmask = 0; crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + crypto->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; crypto->init = skcipher_init_tfm; crypto->exit = skcipher_exit_tfm; @@ -4547,7 +4580,8 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg) hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s); hash->halg.base.cra_init = ahash_cra_init; hash->halg.base.cra_exit = generic_cra_exit; - hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; hash->halg.statesize = sizeof(struct spu_hash_export_s); if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { @@ -4591,7 +4625,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg) aead->base.cra_alignmask = 0; aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - aead->base.cra_flags |= CRYPTO_ALG_ASYNC; + aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; /* setkey set in alg initialization */ aead->setauthsize = aead_setauthsize; aead->encrypt = aead_encrypt; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 797bff9b9318..e94e7f27f1d0 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3412,7 +3412,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init; alg->exit = caam_cra_exit; @@ -3425,7 +3426,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 27e36bdf6163..efe8f15a4a51 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -2502,7 +2502,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init; alg->exit = caam_cra_exit; @@ -2515,7 +2516,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 45e9ff851e2d..1b0c28675906 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -2912,7 +2912,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init_skcipher; alg->exit = caam_cra_exit; @@ -2925,7 +2926,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init_aead; alg->exit = caam_cra_exit_aead; @@ -4547,7 +4549,7 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev, alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_flags = CRYPTO_ALG_ASYNC; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; t_alg->dev = dev; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 27ff4a3d037e..e8a6d8bc43b5 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1927,7 +1927,7 @@ caam_hash_alloc(struct caam_hash_template *template, alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_flags = CRYPTO_ALG_ASYNC; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 0f0991f9f294..5af0dc2a8909 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -341,7 +341,8 @@ static int cvm_enc_dec_init(struct crypto_skcipher *tfm) } static struct skcipher_alg algs[] = { { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -358,7 +359,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -375,7 +377,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -391,7 +394,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -408,7 +412,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_des3_ctx), .base.cra_alignmask = 7, @@ -425,7 +430,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_des3_ctx), .base.cra_alignmask = 7, diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c index dce5423a5883..1be2571363fe 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_aead.c +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -522,7 +522,7 @@ static struct aead_alg nitrox_aeads[] = { { .cra_name = "gcm(aes)", .cra_driver_name = "n5_aes_gcm", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -541,7 +541,7 @@ static struct aead_alg nitrox_aeads[] = { { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "n5_rfc4106", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c index 18088b0a2257..a553ac65f324 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -388,7 +388,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cbc(aes)", .cra_driver_name = "n5_cbc(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -407,7 +407,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "n5_ecb(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -426,7 +426,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cfb(aes)", .cra_driver_name = "n5_cfb(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -445,7 +445,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "xts(aes)", .cra_driver_name = "n5_xts(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -464,7 +464,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "n5_rfc3686(ctr(aes))", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -483,7 +483,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cts(cbc(aes))", .cra_driver_name = "n5_cts(cbc(aes))", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -502,7 +502,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cbc(des3_ede)", .cra_driver_name = "n5_cbc(des3_ede)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -521,7 +521,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "ecb(des3_ede)", .cra_driver_name = "n5_ecb(des3_ede)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 5eba7ee49e81..11a305fa19e6 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -378,6 +378,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head) snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)"); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp"); base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = AES_BLOCK_SIZE; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index 9e8f07c1afac..1c1c939f5c39 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -172,6 +172,7 @@ static struct aead_alg ccp_aes_gcm_defaults = { .maxauthsize = AES_BLOCK_SIZE, .base = { .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 959168a7ac59..6849261ca47d 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -242,6 +242,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head, snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); alg->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; alg->base.cra_blocksize = AES_BLOCK_SIZE; diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index 51e12fbd1159..e6dcd8cedd53 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -212,6 +212,7 @@ static const struct skcipher_alg ccp_aes_defaults = { .init = ccp_aes_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, @@ -229,6 +230,7 @@ static const struct skcipher_alg ccp_aes_rfc3686_defaults = { .init = ccp_aes_rfc3686_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index 9c129defdb50..ec97daf0fcb7 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -136,6 +136,7 @@ static const struct skcipher_alg ccp_des3_defaults = { .init = ccp_des3_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 2bc29736fa45..8fbfdb9e8cd3 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -487,6 +487,7 @@ static int ccp_register_sha_alg(struct list_head *head, snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = def->block_size; diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index a6625b90fb1a..9b68d62149ad 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -4434,6 +4434,7 @@ static int chcr_register_alg(void) driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; driver_algs[i].alg.skcipher.base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK; driver_algs[i].alg.skcipher.base.cra_ctxsize = sizeof(struct chcr_context) + @@ -4445,7 +4446,8 @@ static int chcr_register_alg(void) break; case CRYPTO_ALG_TYPE_AEAD: driver_algs[i].alg.aead.base.cra_flags = - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ALLOCATES_MEMORY; driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; driver_algs[i].alg.aead.init = chcr_aead_cra_init; @@ -4465,7 +4467,8 @@ static int chcr_register_alg(void) a_hash->halg.statesize = SZ_AHASH_REQ_CTX; a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; a_hash->halg.base.cra_module = THIS_MODULE; - a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + a_hash->halg.base.cra_flags = + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; a_hash->halg.base.cra_alignmask = 0; a_hash->halg.base.cra_exit = NULL; diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 4ad4ffd90cee..8ca945ac297e 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -934,7 +934,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(aes)", .cra_driver_name = "hisi_sec_aes_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -953,7 +954,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(aes)", .cra_driver_name = "hisi_sec_aes_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -972,7 +974,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ctr(aes)", .cra_driver_name = "hisi_sec_aes_ctr", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -991,7 +994,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "xts(aes)", .cra_driver_name = "hisi_sec_aes_xts", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1011,7 +1015,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(des)", .cra_driver_name = "hisi_sec_des_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1030,7 +1035,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(des)", .cra_driver_name = "hisi_sec_des_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1049,7 +1055,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "hisi_sec_3des_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1068,7 +1075,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "hisi_sec_3des_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index bfb9ce1359f3..497969ae8b23 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -1475,7 +1475,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ @@ -1599,7 +1599,7 @@ static int sec_aead_decrypt(struct aead_request *a_req) .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 0c5e80c3f6e3..1ac3253b7903 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1300,6 +1300,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = { .cra_driver_name = "safexcel-ecb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1337,6 +1338,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { .cra_driver_name = "safexcel-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1374,6 +1376,7 @@ struct safexcel_alg_template safexcel_alg_cfb_aes = { .cra_driver_name = "safexcel-cfb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1411,6 +1414,7 @@ struct safexcel_alg_template safexcel_alg_ofb_aes = { .cra_driver_name = "safexcel-ofb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1485,6 +1489,7 @@ struct safexcel_alg_template safexcel_alg_ctr_aes = { .cra_driver_name = "safexcel-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1545,6 +1550,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = { .cra_driver_name = "safexcel-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1582,6 +1588,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = { .cra_driver_name = "safexcel-ecb-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1642,6 +1649,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .cra_driver_name = "safexcel-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1679,6 +1687,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .cra_driver_name = "safexcel-ecb-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1751,6 +1760,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1786,6 +1796,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1821,6 +1832,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1856,6 +1868,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1891,6 +1904,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1927,6 +1941,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1963,6 +1978,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1999,6 +2015,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2035,6 +2052,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2071,6 +2089,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2107,6 +2126,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2143,6 +2163,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2179,6 +2200,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2215,6 +2237,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2251,6 +2274,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2285,6 +2309,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2319,6 +2344,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2353,6 +2379,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2387,6 +2414,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2421,6 +2449,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2534,6 +2563,7 @@ struct safexcel_alg_template safexcel_alg_xts_aes = { .cra_driver_name = "safexcel-xts-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = XTS_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2646,6 +2676,7 @@ struct safexcel_alg_template safexcel_alg_gcm = { .cra_driver_name = "safexcel-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2769,6 +2800,7 @@ struct safexcel_alg_template safexcel_alg_ccm = { .cra_driver_name = "safexcel-ccm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2832,6 +2864,7 @@ struct safexcel_alg_template safexcel_alg_chacha20 = { .cra_driver_name = "safexcel-chacha20", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2993,6 +3026,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly = { /* +1 to put it above HW chacha + SW poly */ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, @@ -3032,6 +3066,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly_esp = { /* +1 to put it above HW chacha + SW poly */ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, @@ -3110,6 +3145,7 @@ struct safexcel_alg_template safexcel_alg_ecb_sm4 = { .cra_driver_name = "safexcel-ecb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3147,6 +3183,7 @@ struct safexcel_alg_template safexcel_alg_cbc_sm4 = { .cra_driver_name = "safexcel-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3184,6 +3221,7 @@ struct safexcel_alg_template safexcel_alg_ofb_sm4 = { .cra_driver_name = "safexcel-ofb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3221,6 +3259,7 @@ struct safexcel_alg_template safexcel_alg_cfb_sm4 = { .cra_driver_name = "safexcel-cfb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3273,6 +3312,7 @@ struct safexcel_alg_template safexcel_alg_ctr_sm4 = { .cra_driver_name = "safexcel-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3332,6 +3372,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3441,6 +3482,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SM4_BLOCK_SIZE, @@ -3476,6 +3518,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3510,6 +3553,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3578,6 +3622,7 @@ struct safexcel_alg_template safexcel_alg_rfc4106_gcm = { .cra_driver_name = "safexcel-rfc4106-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3622,6 +3667,7 @@ struct safexcel_alg_template safexcel_alg_rfc4543_gcm = { .cra_driver_name = "safexcel-rfc4543-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3713,6 +3759,7 @@ struct safexcel_alg_template safexcel_alg_rfc4309_ccm = { .cra_driver_name = "safexcel-rfc4309-ccm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 43962bc709c6..16a467969d8e 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -992,6 +992,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = { .cra_driver_name = "safexcel-sha1", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1235,6 +1236,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .cra_driver_name = "safexcel-hmac-sha1", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1291,6 +1293,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = { .cra_driver_name = "safexcel-sha256", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1347,6 +1350,7 @@ struct safexcel_alg_template safexcel_alg_sha224 = { .cra_driver_name = "safexcel-sha224", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1418,6 +1422,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .cra_driver_name = "safexcel-hmac-sha224", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1489,6 +1494,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .cra_driver_name = "safexcel-hmac-sha256", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1545,6 +1551,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = { .cra_driver_name = "safexcel-sha512", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1601,6 +1608,7 @@ struct safexcel_alg_template safexcel_alg_sha384 = { .cra_driver_name = "safexcel-sha384", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1672,6 +1680,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = { .cra_driver_name = "safexcel-hmac-sha512", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1743,6 +1752,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = { .cra_driver_name = "safexcel-hmac-sha384", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1799,6 +1809,7 @@ struct safexcel_alg_template safexcel_alg_md5 = { .cra_driver_name = "safexcel-md5", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1871,6 +1882,7 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = { .cra_driver_name = "safexcel-hmac-md5", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1952,6 +1964,7 @@ struct safexcel_alg_template safexcel_alg_crc32 = { .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2041,6 +2054,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = { .cra_driver_name = "safexcel-cbcmac-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2136,6 +2150,7 @@ struct safexcel_alg_template safexcel_alg_xcbcmac = { .cra_driver_name = "safexcel-xcbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2232,6 +2247,7 @@ struct safexcel_alg_template safexcel_alg_cmac = { .cra_driver_name = "safexcel-cmac-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2288,6 +2304,7 @@ struct safexcel_alg_template safexcel_alg_sm3 = { .cra_driver_name = "safexcel-sm3", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2359,6 +2376,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sm3 = { .cra_driver_name = "safexcel-hmac-sm3", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index ad73fc946682..f478bb0a566a 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1402,7 +1402,8 @@ static int __init ixp_module_init(void) /* block ciphers */ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC; + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; if (!cra->setkey) cra->setkey = ablk_setkey; if (!cra->encrypt) @@ -1435,7 +1436,8 @@ static int __init ixp_module_init(void) /* authenc */ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC; + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; cra->setkey = cra->setkey ?: aead_setkey; cra->setauthsize = aead_setauthsize; cra->encrypt = aead_encrypt; diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index f133c2ccb5ae..45b4d7a29833 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -508,7 +508,8 @@ struct skcipher_alg mv_cesa_ecb_des_alg = { .cra_name = "ecb(des)", .cra_driver_name = "mv-ecb-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), .cra_alignmask = 0, @@ -558,7 +559,8 @@ struct skcipher_alg mv_cesa_cbc_des_alg = { .cra_name = "cbc(des)", .cra_driver_name = "mv-cbc-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), .cra_alignmask = 0, @@ -616,7 +618,8 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "mv-ecb-des3-ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), .cra_alignmask = 0, @@ -669,7 +672,8 @@ struct skcipher_alg mv_cesa_cbc_des3_ede_alg = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "mv-cbc-des3-ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), .cra_alignmask = 0, @@ -741,7 +745,8 @@ struct skcipher_alg mv_cesa_ecb_aes_alg = { .cra_name = "ecb(aes)", .cra_driver_name = "mv-ecb-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), .cra_alignmask = 0, @@ -790,7 +795,8 @@ struct skcipher_alg mv_cesa_cbc_aes_alg = { .cra_name = "cbc(aes)", .cra_driver_name = "mv-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index b971284332b6..bd0bd9ffd6e9 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -921,6 +921,7 @@ struct ahash_alg mv_md5_alg = { .cra_driver_name = "mv-md5", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -991,6 +992,7 @@ struct ahash_alg mv_sha1_alg = { .cra_driver_name = "mv-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -1064,6 +1066,7 @@ struct ahash_alg mv_sha256_alg = { .cra_driver_name = "mv-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -1298,6 +1301,7 @@ struct ahash_alg mv_ahmac_md5_alg = { .cra_driver_name = "mv-hmac-md5", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), @@ -1368,6 +1372,7 @@ struct ahash_alg mv_ahmac_sha1_alg = { .cra_driver_name = "mv-hmac-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), @@ -1438,6 +1443,7 @@ struct ahash_alg mv_ahmac_sha256_alg = { .cra_driver_name = "mv-hmac-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 01bf4826c03e..90bb31329d4b 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -1301,7 +1301,7 @@ static int otx_cpt_aead_null_decrypt(struct aead_request *req) static struct skcipher_alg otx_cpt_skciphers[] = { { .base.cra_name = "xts(aes)", .base.cra_driver_name = "cpt_xts_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1318,7 +1318,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cpt_cbc_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1335,7 +1335,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "cpt_ecb_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1352,7 +1352,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cfb(aes)", .base.cra_driver_name = "cpt_cfb_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1369,7 +1369,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cpt_cbc_des3_ede", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), .base.cra_alignmask = 7, @@ -1386,7 +1386,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "cpt_ecb_des3_ede", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), .base.cra_alignmask = 7, @@ -1407,7 +1407,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "cpt_hmac_sha1_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1426,7 +1426,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "cpt_hmac_sha256_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1445,7 +1445,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "cpt_hmac_sha384_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1464,7 +1464,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "cpt_hmac_sha512_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1483,7 +1483,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha1),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha1_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1502,7 +1502,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha256),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha256_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1521,7 +1521,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha384),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha384_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1540,7 +1540,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha512),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha512_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1559,7 +1559,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "cpt_rfc4106_gcm_aes", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 6a828bbecea4..d8aec5153b21 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1382,7 +1382,8 @@ static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl) snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); alg->base.cra_priority = N2_CRA_PRIORITY; - alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; alg->base.cra_blocksize = tmpl->block_size; p->enc_type = tmpl->enc_type; alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context); diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 13503e16ce1d..dac6eb37fff9 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1232,6 +1232,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1257,6 +1258,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1280,7 +1282,8 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "cbc-des-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), .base.cra_module = THIS_MODULE, @@ -1304,7 +1307,8 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "ecb-des-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), .base.cra_module = THIS_MODULE, @@ -1327,6 +1331,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "cbc-des3-ede-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1351,6 +1356,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "ecb-des3-ede-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1382,6 +1388,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1412,6 +1419,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1442,6 +1450,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1472,6 +1481,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1503,6 +1513,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1533,6 +1544,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1562,6 +1574,7 @@ static struct spacc_alg l2_engine_algs[] = { .base.cra_driver_name = "f8-kasumi-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = 8, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 2a353e249e1c..72753b84dc95 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -1295,7 +1295,7 @@ static struct aead_alg qat_aeads[] = { { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha1", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1312,7 +1312,7 @@ static struct aead_alg qat_aeads[] = { { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha256", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1329,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha512", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1347,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "qat_aes_cbc", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, @@ -1365,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "qat_aes_ctr", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, @@ -1383,7 +1383,8 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "xts(aes)", .base.cra_driver_name = "qat_aes_xts", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index a8147381b774..5630c5addd28 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -406,6 +406,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, alg->base.cra_priority = 300; alg->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); alg->base.cra_alignmask = 0; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9c6db7f698c4..7c547352a862 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2264,7 +2264,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2285,7 +2286,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2306,7 +2308,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2330,7 +2333,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2352,7 +2356,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2373,7 +2378,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2394,7 +2400,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2418,7 +2425,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2440,7 +2448,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2461,7 +2470,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2482,7 +2492,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2506,7 +2517,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2528,7 +2540,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha384-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -2549,7 +2562,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha384-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -2571,7 +2585,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha512-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -2592,7 +2607,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha512-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -2614,7 +2630,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2635,7 +2652,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2655,7 +2673,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2678,7 +2697,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2699,7 +2719,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-talitos", .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = skcipher_aes_setkey, @@ -2712,7 +2733,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-talitos", .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -2727,7 +2749,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-aes-talitos", .base.cra_blocksize = 1, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -2742,7 +2765,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-talitos", .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = skcipher_des_setkey, @@ -2755,7 +2779,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-talitos", .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -2770,7 +2795,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-3des-talitos", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = skcipher_des3_setkey, @@ -2784,7 +2810,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-3des-talitos", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -2804,7 +2831,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "md5", .cra_driver_name = "md5-talitos", .cra_blocksize = MD5_HMAC_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2819,7 +2847,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha1", .cra_driver_name = "sha1-talitos", .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2834,7 +2863,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha224", .cra_driver_name = "sha224-talitos", .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2849,7 +2879,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha256", .cra_driver_name = "sha256-talitos", .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2864,7 +2895,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha384", .cra_driver_name = "sha384-talitos", .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2879,7 +2911,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha512", .cra_driver_name = "sha512-talitos", .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2894,7 +2927,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(md5)", .cra_driver_name = "hmac-md5-talitos", .cra_blocksize = MD5_HMAC_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2909,7 +2943,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha1)", .cra_driver_name = "hmac-sha1-talitos", .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2924,7 +2959,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha224)", .cra_driver_name = "hmac-sha224-talitos", .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2939,7 +2975,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha256)", .cra_driver_name = "hmac-sha256-talitos", .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2954,7 +2991,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha384)", .cra_driver_name = "hmac-sha384-talitos", .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2969,7 +3007,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha512)", .cra_driver_name = "hmac-sha512-talitos", .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index cb8a6ea2a4bc..b2601958282e 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -597,7 +597,8 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "virtio_crypto_aes_cbc", .base.cra_priority = 150, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx), .base.cra_module = THIS_MODULE, diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index cd11558893cd..27079354dbe9 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -364,6 +364,7 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = { .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = ZYNQMP_AES_BLK_SIZE, From d7ba2c09e1aa03e01e1e3be55c8a5f4610500164 Mon Sep 17 00:00:00 2001 From: Meng Yu Date: Fri, 10 Jul 2020 15:40:41 +0800 Subject: [PATCH 106/157] crypto: hisilicon/hpre - Init the value of current_q of debugfs Initialize current queue number as HPRE_PF_DEF_Q_NUM, or it is zero and we can't set its value by "current_q_write". Signed-off-by: Meng Yu Reviewed-by: Zaibo Xu Reviewed-by: Hui Tang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index a3ee127a70e3..31313479a8ec 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -732,6 +732,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF) { qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; } From 6bc937b0b97539ab702a8b0928c0b0ee7a2f264a Mon Sep 17 00:00:00 2001 From: Hui Tang Date: Fri, 10 Jul 2020 15:40:42 +0800 Subject: [PATCH 107/157] crypto: hisilicon/hpre - HPRE_OVERTIME_THRHLD can be written by debugfs Registers in "hpre_dfx_files" can only be cleaned to zero but HPRE_OVERTIME_THRHLD, which can be written as any number. Fixes: 64a6301ebee7("crypto: hisilicon/hpre - add debugfs for ...") Signed-off-by: Hui Tang Signed-off-by: Meng Yu Reviewed-by: Zaibo Xu Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 31313479a8ec..23f2e5c2eb2d 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -548,13 +548,15 @@ static int hpre_debugfs_atomic64_get(void *data, u64 *val) static int hpre_debugfs_atomic64_set(void *data, u64 val) { struct hpre_dfx *dfx_item = data; - struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; + struct hpre_dfx *hpre_dfx = NULL; - if (val) - return -EINVAL; - - if (dfx_item->type == HPRE_OVERTIME_THRHLD) + if (dfx_item->type == HPRE_OVERTIME_THRHLD) { + hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); + } else if (val) { + return -EINVAL; + } + atomic64_set(&dfx_item->value, val); return 0; From a14f6609b0c4e09d2f440d0b389224fbf851630e Mon Sep 17 00:00:00 2001 From: Meng Yu Date: Fri, 10 Jul 2020 15:40:43 +0800 Subject: [PATCH 108/157] crypto: hisilicon/hpre - Modify the Macro definition and format 1. Bit 1 to bit 5 are NFE, not CE. 2. Macro 'HPRE_VF_NUM' is defined in 'qm.h', so delete it here. 3. Delete multiple blank lines. 4. Adjust format alignment. Signed-off-by: Meng Yu Reviewed-by: Zaibo Xu Reviewed-by: Longfang Liu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 23f2e5c2eb2d..da17729ef8ba 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -12,7 +12,6 @@ #include #include "hpre.h" -#define HPRE_VF_NUM 63 #define HPRE_QUEUE_NUM_V2 1024 #define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) @@ -46,9 +45,9 @@ #define HPRE_CORE_IS_SCHD_OFFSET 0x90 #define HPRE_RAS_CE_ENB 0x301410 -#define HPRE_HAC_RAS_CE_ENABLE 0x3f +#define HPRE_HAC_RAS_CE_ENABLE 0x1 #define HPRE_RAS_NFE_ENB 0x301414 -#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0 +#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe #define HPRE_RAS_FE_ENB 0x301418 #define HPRE_HAC_RAS_FE_ENABLE 0 @@ -264,7 +263,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG)); writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG)); ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val, - val & BIT(0), + val & BIT(0), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); if (ret) { @@ -372,7 +371,6 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) u32 num_vfs = qm->vfs_num; u32 vfq_num, tmp; - if (val > num_vfs) return -EINVAL; @@ -449,7 +447,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) } static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, - size_t count, loff_t *pos) + size_t count, loff_t *pos) { struct hpre_debugfs_file *file = filp->private_data; char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; @@ -477,7 +475,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, } static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, - size_t count, loff_t *pos) + size_t count, loff_t *pos) { struct hpre_debugfs_file *file = filp->private_data; char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; From a0c34e8dd8646957a68aa6e3946b9c87554bd6e1 Mon Sep 17 00:00:00 2001 From: Meng Yu Date: Fri, 10 Jul 2020 15:40:44 +0800 Subject: [PATCH 109/157] crypto: hisilicon/hpre - Add a switch in sriov_configure If CONFIG_PCI_IOV is not enabled, we can not use "sriov_configure". Fixes: 5ec302a364bf("crypto: hisilicon - add SRIOV support for HPRE") Signed-off-by: Meng Yu Reviewed-by: Zaibo Xu Reviewed-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index da17729ef8ba..37c2bc5d7c96 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -925,7 +925,8 @@ static struct pci_driver hpre_pci_driver = { .id_table = hpre_dev_ids, .probe = hpre_probe, .remove = hpre_remove, - .sriov_configure = hisi_qm_sriov_configure, + .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? + hisi_qm_sriov_configure : NULL, .err_handler = &hpre_err_handler, }; From dadbe4c11753581d959b866198a48f5727fdc7c3 Mon Sep 17 00:00:00 2001 From: Meng Yu Date: Fri, 10 Jul 2020 15:40:45 +0800 Subject: [PATCH 110/157] crypto: hisilicon/hpre - update debugfs interface parameters Update debugfs interface parameters, and adjust the processing logic inside the corresponding function. Fixes: 848974151618("crypto: hisilicon - Add debugfs for HPRE") Signed-off-by: Meng Yu Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 59 ++++++++++------------- 1 file changed, 26 insertions(+), 33 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 37c2bc5d7c96..b69cea318cdf 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -563,15 +563,17 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, hpre_debugfs_atomic64_set, "%llu\n"); -static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, +static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { + struct hpre *hpre = container_of(qm, struct hpre, qm); + struct hpre_debug *dbg = &hpre->debug; struct dentry *file_dir; if (dir) file_dir = dir; else - file_dir = dbg->debug_root; + file_dir = qm->debug.debug_root; if (type >= HPRE_DEBUG_FILE_NUM) return -EINVAL; @@ -586,10 +588,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, return 0; } -static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) +static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; struct debugfs_regset32 *regset; @@ -601,14 +601,12 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs); regset->base = qm->io_base; - debugfs_create_regset32("regs", 0444, debug->debug_root, regset); + debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset); return 0; } -static int hpre_cluster_debugfs_init(struct hpre_debug *debug) +static int hpre_cluster_debugfs_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; char buf[HPRE_DBGFS_VAL_MAX_LEN]; struct debugfs_regset32 *regset; @@ -619,7 +617,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); if (ret < 0) return -EINVAL; - tmp_d = debugfs_create_dir(buf, debug->debug_root); + tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) @@ -630,7 +628,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) regset->base = qm->io_base + hpre_cluster_offsets[i]; debugfs_create_regset32("regs", 0444, tmp_d, regset); - ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL, + ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL, i + HPRE_CLUSTER_CTRL); if (ret) return ret; @@ -639,32 +637,31 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) return 0; } -static int hpre_ctrl_debug_init(struct hpre_debug *debug) +static int hpre_ctrl_debug_init(struct hisi_qm *qm) { int ret; - ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM, + ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM, HPRE_CURRENT_QM); if (ret) return ret; - ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE, + ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, HPRE_CLEAR_ENABLE); if (ret) return ret; - ret = hpre_pf_comm_regs_debugfs_init(debug); + ret = hpre_pf_comm_regs_debugfs_init(qm); if (ret) return ret; - return hpre_cluster_debugfs_init(debug); + return hpre_cluster_debugfs_init(qm); } -static void hpre_dfx_debug_init(struct hpre_debug *debug) +static void hpre_dfx_debug_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hpre *hpre = container_of(qm, struct hpre, qm); struct hpre_dfx *dfx = hpre->debug.dfx; - struct hisi_qm *qm = &hpre->qm; struct dentry *parent; int i; @@ -676,30 +673,27 @@ static void hpre_dfx_debug_init(struct hpre_debug *debug) } } -static int hpre_debugfs_init(struct hpre *hpre) +static int hpre_debugfs_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; - struct dentry *dir; int ret; - dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root); - qm->debug.debug_root = dir; + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + hpre_debugfs_root); + qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; - ret = hisi_qm_debug_init(qm); if (ret) goto failed_to_create; if (qm->pdev->device == HPRE_PCI_DEVICE_ID) { - hpre->debug.debug_root = dir; - ret = hpre_ctrl_debug_init(&hpre->debug); + ret = hpre_ctrl_debug_init(qm); if (ret) goto failed_to_create; } - hpre_dfx_debug_init(&hpre->debug); + hpre_dfx_debug_init(qm); return 0; @@ -708,10 +702,8 @@ failed_to_create: return ret; } -static void hpre_debugfs_exit(struct hpre *hpre) +static void hpre_debugfs_exit(struct hisi_qm *qm) { - struct hisi_qm *qm = &hpre->qm; - debugfs_remove_recursive(qm->debug.debug_root); } @@ -850,7 +842,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_with_err_init; - ret = hpre_debugfs_init(hpre); + ret = hpre_debugfs_init(qm); if (ret) dev_warn(&pdev->dev, "init debugfs fail!\n"); @@ -875,6 +867,7 @@ err_with_crypto_register: err_with_qm_start: hisi_qm_del_from_list(qm, &hpre_devices); + hpre_debugfs_exit(qm); hisi_qm_stop(qm); err_with_err_init: @@ -906,7 +899,7 @@ static void hpre_remove(struct pci_dev *pdev) qm->debug.curr_qm_qp_num = 0; } - hpre_debugfs_exit(hpre); + hpre_debugfs_exit(qm); hisi_qm_stop(qm); hisi_qm_dev_err_uninit(qm); hisi_qm_uninit(qm); From 10f33d391ebd4edc861685ed0566cf807a363661 Mon Sep 17 00:00:00 2001 From: Hui Tang Date: Fri, 10 Jul 2020 15:40:46 +0800 Subject: [PATCH 111/157] crypto: hisilicon/hpre - disable FLR triggered by hardware for Hi1620 hardware, we should disable these hardware flr: 1. BME_FLR - bit 7, 2. PM_FLR - bit 11, 3. SRIOV_FLR - bit 12, Or HPRE may goto D3 state, when we bind and unbind HPRE quickly, as it does FLR triggered by BME/PM/SRIOV. Fixes: c8b4b477079d("crypto: hisilicon - add HiSilicon HPRE accelerator") Signed-off-by: Hui Tang Signed-off-by: Meng Yu Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 26 +++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index b69cea318cdf..b135c74fb619 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -82,6 +82,10 @@ #define HPRE_CORE_ECC_2BIT_ERR BIT(1) #define HPRE_OOO_ECC_2BIT_ERR BIT(5) +#define HPRE_QM_BME_FLR BIT(7) +#define HPRE_QM_PM_FLR BIT(11) +#define HPRE_QM_SRIOV_FLR BIT(12) + #define HPRE_VIA_MSI_DSM 1 #define HPRE_SQE_MASK_OFFSET 8 #define HPRE_SQE_MASK_LEN 24 @@ -230,6 +234,22 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm) return 0; } +/* + * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV). + * Or it may stay in D3 state when we bind and unbind hpre quickly, + * as it does FLR triggered by hardware. + */ +static void disable_flr_of_bme(struct hisi_qm *qm) +{ + u32 val; + + val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR); + val |= HPRE_QM_PM_FLR; + writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); +} + static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -241,10 +261,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE)); writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG)); - /* disable FLR triggered by BME(bus master enable) */ - writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); - writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); - /* HPRE need more time, we close this interrupt */ val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); val |= BIT(HPRE_TIMEOUT_ABNML_BIT); @@ -295,6 +311,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) if (ret) dev_err(dev, "acpi_evaluate_dsm err.\n"); + disable_flr_of_bme(qm); + return ret; } From a874f59104304278f922215caaa9dbfb03b2a1fa Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 10 Jul 2020 20:34:28 -0700 Subject: [PATCH 112/157] crypto: xts - prefix function and struct names with "xts" Overly-generic names can cause problems like naming collisions, confusing crash reports, and reduced grep-ability. E.g. see commit d099ea6e6fde ("crypto - Avoid free() namespace collision"). Clean this up for the xts template by prefixing the names with "xts_". (I didn't use "crypto_xts_" instead because that seems overkill.) Also constify the tfm context in a couple places, and make xts_free_instance() use the instance context structure so that it doesn't just assume the crypto_skcipher_spawn is at the beginning. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/xts.c | 137 +++++++++++++++++++++++++++------------------------ 1 file changed, 72 insertions(+), 65 deletions(-) diff --git a/crypto/xts.c b/crypto/xts.c index 9a7adab6c3e1..3c3ed02c7663 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -20,7 +20,7 @@ #include #include -struct priv { +struct xts_tfm_ctx { struct crypto_skcipher *child; struct crypto_cipher *tweak; }; @@ -30,17 +30,17 @@ struct xts_instance_ctx { char name[CRYPTO_MAX_ALG_NAME]; }; -struct rctx { +struct xts_request_ctx { le128 t; struct scatterlist *tail; struct scatterlist sg[2]; struct skcipher_request subreq; }; -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int xts_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child; struct crypto_cipher *tweak; int err; @@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * mutliple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the gf128mul_x_ble() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) +static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, + bool enc) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); const int bs = XTS_BLOCK_SIZE; @@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) return err; } -static int xor_tweak_pre(struct skcipher_request *req, bool enc) +static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc) { - return xor_tweak(req, false, enc); + return xts_xor_tweak(req, false, enc); } -static int xor_tweak_post(struct skcipher_request *req, bool enc) +static int xts_xor_tweak_post(struct skcipher_request *req, bool enc) { - return xor_tweak(req, true, enc); + return xts_xor_tweak(req, true, enc); } -static void cts_done(struct crypto_async_request *areq, int err) +static void xts_cts_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; le128 b; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); le128_xor(&b, &rctx->t, &b); @@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static int cts_final(struct skcipher_request *req, - int (*crypt)(struct skcipher_request *req)) +static int xts_cts_final(struct skcipher_request *req, + int (*crypt)(struct skcipher_request *req)) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + const struct xts_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int tail = req->cryptlen % XTS_BLOCK_SIZE; le128 b[2]; @@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req, scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, cts_done, req); + skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done, + req); skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, XTS_BLOCK_SIZE, NULL); @@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req, return 0; } -static void encrypt_done(struct crypto_async_request *areq, int err) +static void xts_encrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req, true); + err = xts_xor_tweak_post(req, true); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = cts_final(req, crypto_skcipher_encrypt); + err = xts_cts_final(req, crypto_skcipher_encrypt); if (err == -EINPROGRESS) return; } @@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static void decrypt_done(struct crypto_async_request *areq, int err) +static void xts_decrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req, false); + err = xts_xor_tweak_post(req, false); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = cts_final(req, crypto_skcipher_decrypt); + err = xts_cts_final(req, crypto_skcipher_decrypt); if (err == -EINPROGRESS) return; } @@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) +static int xts_init_crypt(struct skcipher_request *req, + crypto_completion_t compl) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct xts_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; if (req->cryptlen < XTS_BLOCK_SIZE) @@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) return 0; } -static int encrypt(struct skcipher_request *req) +static int xts_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int err; - err = init_crypt(req, encrypt_done) ?: - xor_tweak_pre(req, true) ?: + err = xts_init_crypt(req, xts_encrypt_done) ?: + xts_xor_tweak_pre(req, true) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req, true); + xts_xor_tweak_post(req, true); if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; - return cts_final(req, crypto_skcipher_encrypt); + return xts_cts_final(req, crypto_skcipher_encrypt); } -static int decrypt(struct skcipher_request *req) +static int xts_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int err; - err = init_crypt(req, decrypt_done) ?: - xor_tweak_pre(req, false) ?: + err = xts_init_crypt(req, xts_decrypt_done) ?: + xts_xor_tweak_pre(req, false) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req, false); + xts_xor_tweak_post(req, false); if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; - return cts_final(req, crypto_skcipher_decrypt); + return xts_cts_final(req, crypto_skcipher_decrypt); } -static int init_tfm(struct crypto_skcipher *tfm) +static int xts_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *child; struct crypto_cipher *tweak; @@ -309,26 +314,28 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->tweak = tweak; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + - sizeof(struct rctx)); + sizeof(struct xts_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void xts_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->child); crypto_free_cipher(ctx->tweak); } -static void crypto_xts_free(struct skcipher_instance *inst) +static void xts_free_instance(struct skcipher_instance *inst) { - crypto_drop_skcipher(skcipher_instance_ctx(inst)); + struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); + + crypto_drop_skcipher(&ictx->spawn); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; struct xts_instance_ctx *ctx; @@ -416,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = xts_init_tfm; + inst->alg.exit = xts_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = xts_setkey; + inst->alg.encrypt = xts_encrypt; + inst->alg.decrypt = xts_decrypt; - inst->free = crypto_xts_free; + inst->free = xts_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - crypto_xts_free(inst); + xts_free_instance(inst); } return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template xts_tmpl = { .name = "xts", - .create = create, + .create = xts_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init xts_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&xts_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit xts_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&xts_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(xts_module_init); +module_exit(xts_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); From e456ef6ace3451fa46bd67983345ea758f824865 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 10 Jul 2020 20:36:49 -0700 Subject: [PATCH 113/157] crypto: lrw - prefix function and struct names with "lrw" Overly-generic names can cause problems like naming collisions, confusing crash reports, and reduced grep-ability. E.g. see commit d099ea6e6fde ("crypto - Avoid free() namespace collision"). Clean this up for the lrw template by prefixing the names with "lrw_". (I didn't use "crypto_lrw_" instead because that seems overkill.) Also constify the tfm context in a couple places. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/lrw.c | 119 ++++++++++++++++++++++++++------------------------- 1 file changed, 61 insertions(+), 58 deletions(-) diff --git a/crypto/lrw.c b/crypto/lrw.c index a709c801ee45..3f90a5ec28f7 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -27,7 +27,7 @@ #define LRW_BLOCK_SIZE 16 -struct priv { +struct lrw_tfm_ctx { struct crypto_skcipher *child; /* @@ -49,12 +49,12 @@ struct priv { be128 mulinc[128]; }; -struct rctx { +struct lrw_request_ctx { be128 t; struct skcipher_request subreq; }; -static inline void setbit128_bbe(void *b, int bit) +static inline void lrw_setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN @@ -65,10 +65,10 @@ static inline void setbit128_bbe(void *b, int bit) ), b); } -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; @@ -92,7 +92,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, /* initialize optimization table */ for (i = 0; i < 128; i++) { - setbit128_bbe(&tmp, i); + lrw_setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } @@ -108,10 +108,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * For example: * * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; - * int i = next_index(&counter); + * int i = lrw_next_index(&counter); * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } */ -static int next_index(u32 *counter) +static int lrw_next_index(u32 *counter) { int i, res = 0; @@ -135,14 +135,14 @@ static int next_index(u32 *counter) * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make * mutliple calls to the 'ecb(..)' instance, which usually would be slower than - * just doing the next_index() calls again. + * just doing the lrw_next_index() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass) +static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) { const int bs = LRW_BLOCK_SIZE; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct priv *ctx = crypto_skcipher_ctx(tfm); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); be128 t = rctx->t; struct skcipher_walk w; __be32 *iv; @@ -178,7 +178,8 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) /* T <- I*Key2, using the optimization * discussed in the specification */ - be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]); + be128_xor(&t, &t, + &ctx->mulinc[lrw_next_index(counter)]); } while ((avail -= bs) >= bs); if (second_pass && w.nbytes == w.total) { @@ -194,38 +195,40 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) return err; } -static int xor_tweak_pre(struct skcipher_request *req) +static int lrw_xor_tweak_pre(struct skcipher_request *req) { - return xor_tweak(req, false); + return lrw_xor_tweak(req, false); } -static int xor_tweak_post(struct skcipher_request *req) +static int lrw_xor_tweak_post(struct skcipher_request *req) { - return xor_tweak(req, true); + return lrw_xor_tweak(req, true); } -static void crypt_done(struct crypto_async_request *areq, int err) +static void lrw_crypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req); + err = lrw_xor_tweak_post(req); } skcipher_request_complete(req, err); } -static void init_crypt(struct skcipher_request *req) +static void lrw_init_crypt(struct skcipher_request *req) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); + skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, + req); /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ skcipher_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen, req->iv); @@ -237,33 +240,33 @@ static void init_crypt(struct skcipher_request *req) gf128mul_64k_bbe(&rctx->t, ctx->table); } -static int encrypt(struct skcipher_request *req) +static int lrw_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int decrypt(struct skcipher_request *req) +static int lrw_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int init_tfm(struct crypto_skcipher *tfm) +static int lrw_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *cipher; cipher = crypto_spawn_skcipher(spawn); @@ -273,27 +276,27 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->child = cipher; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + - sizeof(struct rctx)); + sizeof(struct lrw_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void lrw_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->table) gf128mul_free_64k(ctx->table); crypto_free_skcipher(ctx->child); } -static void crypto_lrw_free(struct skcipher_instance *inst) +static void lrw_free_instance(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; @@ -384,43 +387,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + LRW_BLOCK_SIZE; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = lrw_init_tfm; + inst->alg.exit = lrw_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = lrw_setkey; + inst->alg.encrypt = lrw_encrypt; + inst->alg.decrypt = lrw_decrypt; - inst->free = crypto_lrw_free; + inst->free = lrw_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - crypto_lrw_free(inst); + lrw_free_instance(inst); } return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template lrw_tmpl = { .name = "lrw", - .create = create, + .create = lrw_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init lrw_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&lrw_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit lrw_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&lrw_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(lrw_module_init); +module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); From d110cf0ac1bf24ac098b8ea5b0188c717acf7a02 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 12 Jul 2020 23:14:04 +0200 Subject: [PATCH 114/157] crypto: chelsio - Avoid some code duplication The error handling path of 'chcr_authenc_setkey()' is the same as this error handling code. So just 'goto out' as done everywhere in the function to simplify the code. Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 9b68d62149ad..f37744f76b0f 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -3598,9 +3598,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, base_hash = chcr_alloc_shash(max_authsize); if (IS_ERR(base_hash)) { pr_err("chcr : Base driver cannot be loaded\n"); - aeadctx->enckey_len = 0; - memzero_explicit(&keys, sizeof(keys)); - return -EINVAL; + goto out; } { SHASH_DESC_ON_STACK(shash, base_hash); From 1b3eeb8761f053422a4b3bcb024fc3bf41e12304 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 12 Jul 2020 23:14:24 +0200 Subject: [PATCH 115/157] crypto: chelsio - Fix some pr_xxx messages At the top this file, we have: #define pr_fmt(fmt) "chcr:" fmt So there is no need to repeat "chcr : " in some error message when the pr_xxx macro is used. This would lead to log "chcr:chcr : blabla" Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index f37744f76b0f..13b908ea4873 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -1215,7 +1215,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, wrparam.bytes = bytes; skb = create_cipher_wr(&wrparam); if (IS_ERR(skb)) { - pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); + pr_err("%s : Failed to form WR. No memory\n", __func__); err = PTR_ERR(skb); goto unmap; } @@ -1545,7 +1545,7 @@ static int get_alg_config(struct algo_param *params, params->result_size = SHA512_DIGEST_SIZE; break; default: - pr_err("chcr : ERROR, unsupported digest size\n"); + pr_err("ERROR, unsupported digest size\n"); return -EINVAL; } return 0; @@ -3560,7 +3560,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, goto out; if (get_alg_config(¶m, max_authsize)) { - pr_err("chcr : Unsupported digest size\n"); + pr_err("Unsupported digest size\n"); goto out; } subtype = get_aead_subtype(authenc); @@ -3579,7 +3579,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - pr_err("chcr : Unsupported cipher key\n"); + pr_err("Unsupported cipher key\n"); goto out; } @@ -3597,7 +3597,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } base_hash = chcr_alloc_shash(max_authsize); if (IS_ERR(base_hash)) { - pr_err("chcr : Base driver cannot be loaded\n"); + pr_err("Base driver cannot be loaded\n"); goto out; } { @@ -3613,7 +3613,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, keys.authkeylen, o_ptr); if (err) { - pr_err("chcr : Base driver cannot be loaded\n"); + pr_err("Base driver cannot be loaded\n"); goto out; } keys.authkeylen = max_authsize; @@ -3698,7 +3698,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); + pr_err("Unsupported cipher key %d\n", keys.enckeylen); goto out; } memcpy(aeadctx->key, keys.enckey, keys.enckeylen); @@ -3734,7 +3734,7 @@ static int chcr_aead_op(struct aead_request *req, cdev = a_ctx(tfm)->dev; if (!cdev) { - pr_err("chcr : %s : No crypto device.\n", __func__); + pr_err("%s : No crypto device.\n", __func__); return -ENXIO; } @@ -4487,8 +4487,7 @@ static int chcr_register_alg(void) break; } if (err) { - pr_err("chcr : %s : Algorithm registration failed\n", - name); + pr_err("%s : Algorithm registration failed\n", name); goto register_err; } else { driver_algs[i].is_registered = 1; From 44623b2818f4a442726639572f44fd9b6d0ef68c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 27 May 2020 16:17:40 +0200 Subject: [PATCH 116/157] crypto: x86/crc32c - fix building with clang ias The clang integrated assembler complains about movzxw: arch/x86/crypto/crc32c-pcl-intel-asm_64.S:173:2: error: invalid instruction mnemonic 'movzxw' It seems that movzwq is the mnemonic that it expects instead, and this is what objdump prints when disassembling the file. Fixes: 6a8ce1ef3940 ("crypto: crc32c - Optimize CRC32C calculation with PCLMULQDQ instruction") Signed-off-by: Arnd Bergmann Reviewed-by: Nathan Chancellor Signed-off-by: Herbert Xu --- arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index d84c8bf64f02..884dc767b051 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -169,7 +169,7 @@ continue_block: ## branch into array lea jump_table(%rip), %bufp - movzxw (%bufp, %rax, 2), len + movzwq (%bufp, %rax, 2), len lea crc_array(%rip), %bufp lea (%bufp, len, 1), %bufp JMP_NOSPEC bufp From 2ce9a7299bf6332cf32c12cdf360983da56be33b Mon Sep 17 00:00:00 2001 From: Keerthy Date: Mon, 13 Jul 2020 11:34:21 +0300 Subject: [PATCH 117/157] dt-bindings: crypto: Add TI SA2UL crypto accelerator documentation The Security Accelerator Ultra Lite (SA2UL) subsystem provides hardware cryptographic acceleration for the following use cases: * Encryption and authentication for secure boot * Encryption and authentication of content in applications requiring DRM (digital rights management) and content/asset protection SA2UL provides support for number of different cryptographic algorithms including SHA1, SHA256, SHA512, AES, 3DES, and various combinations of the previous for AEAD use. Reviewed-by: Rob Herring Signed-off-by: Keerthy [t-kristo@ti.com: converted documentation to yaml] Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/ti,sa2ul.yaml | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml diff --git a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml new file mode 100644 index 000000000000..85ef69ffebed --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/ti,sa2ul.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: K3 SoC SA2UL crypto module + +maintainers: + - Tero Kristo + +properties: + compatible: + enum: + - ti,j721e-sa2ul + - ti,am654-sa2ul + + reg: + maxItems: 1 + + power-domains: + maxItems: 1 + + dmas: + items: + - description: TX DMA Channel + - description: RX DMA Channel #1 + - description: RX DMA Channel #2 + + dma-names: + items: + - const: tx + - const: rx1 + - const: rx2 + + dma-coherent: true + + "#address-cells": + const: 2 + + "#size-cells": + const: 2 + + ranges: + description: + Address translation for the possible RNG child node for SA2UL + +patternProperties: + "^rng@[a-f0-9]+$": + type: object + description: + Child RNG node for SA2UL + +required: + - compatible + - reg + - power-domains + - dmas + - dma-names + - dma-coherent + +additionalProperties: false + +examples: + - | + #include + + main_crypto: crypto@4e00000 { + compatible = "ti,j721-sa2ul"; + reg = <0x0 0x4e00000 0x0 0x1200>; + power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>; + dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, + <&main_udmap 0x4001>; + dma-names = "tx", "rx1", "rx2"; + dma-coherent; + }; From 7694b6ca649fead1a57046935711bc82dfc78cfb Mon Sep 17 00:00:00 2001 From: Keerthy Date: Mon, 13 Jul 2020 11:34:22 +0300 Subject: [PATCH 118/157] crypto: sa2ul - Add crypto driver Adds a basic crypto driver and currently supports AES/3DES in cbc mode for both encryption and decryption. Signed-off-by: Keerthy [t-kristo@ti.com: major re-work to fix various bugs in the driver and to cleanup the code] Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 14 + drivers/crypto/Makefile | 1 + drivers/crypto/sa2ul.c | 1388 +++++++++++++++++++++++++++++++++++++++ drivers/crypto/sa2ul.h | 380 +++++++++++ 4 files changed, 1783 insertions(+) create mode 100644 drivers/crypto/sa2ul.c create mode 100644 drivers/crypto/sa2ul.h diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 585ad584e421..aa3a4ed07a66 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -866,4 +866,18 @@ source "drivers/crypto/hisilicon/Kconfig" source "drivers/crypto/amlogic/Kconfig" +config CRYPTO_DEV_SA2UL + tristate "Support for TI security accelerator" + depends on ARCH_K3 || COMPILE_TEST + select ARM64_CRYPTO + select CRYPTO_AES + select CRYPTO_AES_ARM64 + select CRYPTO_ALGAPI + select HW_RANDOM + select SG_SPLIT + help + K3 devices include a security accelerator engine that may be + used for crypto offload. Select this if you want to use hardware + acceleration for cryptographic algorithms on these devices. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 944ed7226e37..53fc115cf459 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o +obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_ARCH_STM32) += stm32/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c new file mode 100644 index 000000000000..860c7435fefa --- /dev/null +++ b/drivers/crypto/sa2ul.c @@ -0,0 +1,1388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * K3 SA2UL crypto accelerator driver + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Keerthy + * Vitaly Andrianov + * Tero Kristo + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "sa2ul.h" + +/* Byte offset for key in encryption security context */ +#define SC_ENC_KEY_OFFSET (1 + 27 + 4) +/* Byte offset for Aux-1 in encryption security context */ +#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32) + +#define SA_CMDL_UPD_ENC 0x0001 +#define SA_CMDL_UPD_AUTH 0x0002 +#define SA_CMDL_UPD_ENC_IV 0x0004 +#define SA_CMDL_UPD_AUTH_IV 0x0008 +#define SA_CMDL_UPD_AUX_KEY 0x0010 + +#define SA_AUTH_SUBKEY_LEN 16 +#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF +#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000 + +#define MODE_CONTROL_BYTES 27 +#define SA_HASH_PROCESSING 0 +#define SA_CRYPTO_PROCESSING 0 +#define SA_UPLOAD_HASH_TO_TLR BIT(6) + +#define SA_SW0_FLAGS_MASK 0xF0000 +#define SA_SW0_CMDL_INFO_MASK 0x1F00000 +#define SA_SW0_CMDL_PRESENT BIT(4) +#define SA_SW0_ENG_ID_MASK 0x3E000000 +#define SA_SW0_DEST_INFO_PRESENT BIT(30) +#define SA_SW2_EGRESS_LENGTH 0xFF000000 +#define SA_BASIC_HASH 0x10 + +#define SHA256_DIGEST_WORDS 8 +/* Make 32-bit word from 4 bytes */ +#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \ + ((b2) << 8) | (b3)) + +/* size of SCCTL structure in bytes */ +#define SA_SCCTL_SZ 16 + +/* Max Authentication tag size */ +#define SA_MAX_AUTH_TAG_SZ 64 + +#define PRIV_ID 0x1 +#define PRIV 0x1 + +static struct device *sa_k3_dev; + +/** + * struct sa_cmdl_cfg - Command label configuration descriptor + * @enc_eng_id: Encryption Engine ID supported by the SA hardware + * @iv_size: Initialization Vector size + */ +struct sa_cmdl_cfg { + u8 enc_eng_id; + u8 iv_size; +}; + +/** + * struct algo_data - Crypto algorithm specific data + * @enc_eng: Encryption engine info structure + * @iv_idx: iv index in psdata + * @iv_out_size: iv out size + * @ealg_id: Encryption Algorithm ID + * @mci_enc: Mode Control Instruction for Encryption algorithm + * @mci_dec: Mode Control Instruction for Decryption + * @inv_key: Whether the encryption algorithm demands key inversion + * @ctx: Pointer to the algorithm context + */ +struct algo_data { + struct sa_eng_info enc_eng; + u8 iv_idx; + u8 iv_out_size; + u8 ealg_id; + u8 *mci_enc; + u8 *mci_dec; + bool inv_key; + struct sa_tfm_ctx *ctx; +}; + +/** + * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms + * @type: Type of the crypto algorithm. + * @alg: Union of crypto algorithm definitions. + * @registered: Flag indicating if the crypto algorithm is already registered + */ +struct sa_alg_tmpl { + u32 type; /* CRYPTO_ALG_TYPE from */ + union { + struct skcipher_alg skcipher; + } alg; + bool registered; +}; + +/** + * struct sa_rx_data: RX Packet miscellaneous data place holder + * @req: crypto request data pointer + * @ddev: pointer to the DMA device + * @tx_in: dma_async_tx_descriptor pointer for rx channel + * @split_src_sg: Set if the src sg is split and needs to be freed up + * @split_dst_sg: Set if the dst sg is split and needs to be freed up + * @enc: Flag indicating either encryption or decryption + * @enc_iv_size: Initialisation vector size + * @iv_idx: Initialisation vector index + * @rx_sg: Static scatterlist entry for overriding RX data + * @tx_sg: Static scatterlist entry for overriding TX data + * @src: Source data pointer + * @dst: Destination data pointer + */ +struct sa_rx_data { + void *req; + struct device *ddev; + struct dma_async_tx_descriptor *tx_in; + struct scatterlist *split_src_sg; + struct scatterlist *split_dst_sg; + u8 enc; + u8 enc_iv_size; + u8 iv_idx; + struct scatterlist rx_sg; + struct scatterlist tx_sg; + struct scatterlist *src; + struct scatterlist *dst; +}; + +/** + * struct sa_req: SA request definition + * @dev: device for the request + * @size: total data to the xmitted via DMA + * @enc_offset: offset of cipher data + * @enc_size: data to be passed to cipher engine + * @enc_iv: cipher IV + * @type: algorithm type for the request + * @cmdl: command label pointer + * @base: pointer to the base request + * @ctx: pointer to the algorithm context data + * @enc: true if this is an encode request + * @src: source data + * @dst: destination data + * @callback: DMA callback for the request + * @mdata_size: metadata size passed to DMA + */ +struct sa_req { + struct device *dev; + u16 size; + u8 enc_offset; + u16 enc_size; + u8 *enc_iv; + u32 type; + u32 *cmdl; + struct crypto_async_request *base; + struct sa_tfm_ctx *ctx; + bool enc; + struct scatterlist *src; + struct scatterlist *dst; + dma_async_tx_callback callback; + u16 mdata_size; +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for encryption + */ +static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = { + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for decryption + */ +static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = { + { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For ECB (Electronic Code Book) mode for encryption + */ +static u8 mci_ecb_enc_array[3][27] = { + { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For ECB (Electronic Code Book) mode for decryption + */ +static u8 mci_ecb_dec_array[3][27] = { + { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for DES algorithm + * For CBC (Cipher Block Chaining) mode and ECB mode + * encryption and for decryption respectively + */ +static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = { + 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = { + 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = { + 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = { + 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +/* + * Perform 16 byte or 128 bit swizzling + * The SA2UL Expects the security context to + * be in little Endian and the bus width is 128 bits or 16 bytes + * Hence swap 16 bytes at a time from higher to lower address + */ +static void sa_swiz_128(u8 *in, u16 len) +{ + u8 data[16]; + int i, j; + + for (i = 0; i < len; i += 16) { + memcpy(data, &in[i], 16); + for (j = 0; j < 16; j++) + in[i + j] = data[15 - j]; + } +} + +/* Derive the inverse key used in AES-CBC decryption operation */ +static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz) +{ + struct crypto_aes_ctx ctx; + int key_pos; + + if (aes_expandkey(&ctx, key, key_sz)) { + dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz); + return -EINVAL; + } + + /* work around to get the right inverse for AES_KEYSIZE_192 size keys */ + if (key_sz == AES_KEYSIZE_192) { + ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46]; + ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47]; + } + + /* Based crypto_aes_expand_key logic */ + switch (key_sz) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + key_pos = key_sz + 24; + break; + + case AES_KEYSIZE_256: + key_pos = key_sz + 24 - 4; + break; + + default: + dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz); + return -EINVAL; + } + + memcpy(inv_key, &ctx.key_enc[key_pos], key_sz); + return 0; +} + +/* Set Security context for the encryption engine */ +static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz, + u8 enc, u8 *sc_buf) +{ + const u8 *mci = NULL; + + /* Set Encryption mode selector to crypto processing */ + sc_buf[0] = SA_CRYPTO_PROCESSING; + + if (enc) + mci = ad->mci_enc; + else + mci = ad->mci_dec; + /* Set the mode control instructions in security context */ + if (mci) + memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES); + + /* For AES-CBC decryption get the inverse key */ + if (ad->inv_key && !enc) { + if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz)) + return -EINVAL; + /* For all other cases: key is used */ + } else { + memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz); + } + + return 0; +} + +static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16) +{ + int j; + + for (j = 0; j < ((size16) ? 4 : 2); j++) { + *out = cpu_to_be32(*((u32 *)iv)); + iv += 4; + out++; + } +} + +/* Format general command label */ +static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, + struct sa_cmdl_upd_info *upd_info) +{ + u8 enc_offset = 0, total = 0; + u8 enc_next_eng = SA_ENG_ID_OUTPORT2; + u32 *word_ptr = (u32 *)cmdl; + int i; + + /* Clear the command label */ + memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32))); + + /* Iniialize the command update structure */ + memzero_explicit(upd_info, sizeof(*upd_info)); + + if (cfg->enc_eng_id != SA_ENG_ID_NONE) + total = SA_CMDL_HEADER_SIZE_BYTES; + + if (cfg->iv_size) + total += cfg->iv_size; + + enc_next_eng = SA_ENG_ID_OUTPORT2; + + if (cfg->enc_eng_id != SA_ENG_ID_NONE) { + upd_info->flags |= SA_CMDL_UPD_ENC; + upd_info->enc_size.index = enc_offset >> 2; + upd_info->enc_offset.index = upd_info->enc_size.index + 1; + /* Encryption command label */ + cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng; + + /* Encryption modes requiring IV */ + if (cfg->iv_size) { + upd_info->flags |= SA_CMDL_UPD_ENC_IV; + upd_info->enc_iv.index = + (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2; + upd_info->enc_iv.size = cfg->iv_size; + + cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size; + + cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] = + (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3)); + enc_offset += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size; + } else { + cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES; + enc_offset += SA_CMDL_HEADER_SIZE_BYTES; + } + } + + total = roundup(total, 8); + + for (i = 0; i < total / 4; i++) + word_ptr[i] = swab32(word_ptr[i]); + + return total; +} + +/* Update Command label */ +static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl, + struct sa_cmdl_upd_info *upd_info) +{ + int i = 0, j; + + if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) { + cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK; + cmdl[upd_info->enc_size.index] |= req->enc_size; + cmdl[upd_info->enc_offset.index] &= + ~SA_CMDL_SOP_BYPASS_LEN_MASK; + cmdl[upd_info->enc_offset.index] |= + ((u32)req->enc_offset << + __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK)); + + if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) { + __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index]; + u32 *enc_iv = (u32 *)req->enc_iv; + + for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) { + data[j] = cpu_to_be32(*enc_iv); + enc_iv++; + } + } + } +} + +/* Format SWINFO words to be sent to SA */ +static +void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys, + u8 cmdl_present, u8 cmdl_offset, u8 flags, + u8 hash_size, u32 *swinfo) +{ + swinfo[0] = sc_id; + swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK)); + if (likely(cmdl_present)) + swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) << + __ffs(SA_SW0_CMDL_INFO_MASK)); + swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK)); + + swinfo[0] |= SA_SW0_DEST_INFO_PRESENT; + swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL); + swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32); + swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH)); +} + +/* Dump the security context */ +static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr) +{ +#ifdef DEBUG + dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr); + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, + 16, 1, buf, SA_CTX_MAX_SZ, false); +#endif +} + +static +int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, + u16 enc_key_sz, struct algo_data *ad, u8 enc, u32 *swinfo) +{ + int enc_sc_offset = 0; + u8 *sc_buf = ctx->sc; + u16 sc_id = ctx->sc_id; + u8 first_engine; + + memzero_explicit(sc_buf, SA_CTX_MAX_SZ); + + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + + /* SCCTL Owner info: 0=host, 1=CP_ACE */ + sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0; + /* SCCTL F/E control */ + sc_buf[1] = SA_SCCTL_FE_ENC; + memcpy(&sc_buf[2], &sc_id, 2); + sc_buf[4] = 0x0; + sc_buf[5] = PRIV_ID; + sc_buf[6] = PRIV; + sc_buf[7] = 0x0; + + /* Prepare context for encryption engine */ + if (ad->enc_eng.sc_size) { + if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc, + &sc_buf[enc_sc_offset])) + return -EINVAL; + } + + /* Set the ownership of context to CP_ACE */ + sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80; + + /* swizzle the security context */ + sa_swiz_128(sc_buf, SA_CTX_MAX_SZ); + /* Setup SWINFO */ + first_engine = ad->enc_eng.eng_id; + + sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0, + SA_SW_INFO_FLAG_EVICT, ad->iv_out_size, swinfo); + + sa_dump_sc(sc_buf, ctx->sc_phys); + + return 0; +} + +/* Free the per direction context memory */ +static void sa_free_ctx_info(struct sa_ctx_info *ctx, + struct sa_crypto_data *data) +{ + unsigned long bn; + + bn = ctx->sc_id - data->sc_id_start; + spin_lock(&data->scid_lock); + __clear_bit(bn, data->ctx_bm); + data->sc_id--; + spin_unlock(&data->scid_lock); + + if (ctx->sc) { + dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys); + ctx->sc = NULL; + } +} + +static int sa_init_ctx_info(struct sa_ctx_info *ctx, + struct sa_crypto_data *data) +{ + unsigned long bn; + int err; + + spin_lock(&data->scid_lock); + bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX); + __set_bit(bn, data->ctx_bm); + data->sc_id++; + spin_unlock(&data->scid_lock); + + ctx->sc_id = (u16)(data->sc_id_start + bn); + + ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys); + if (!ctx->sc) { + dev_err(&data->pdev->dev, "Failed to allocate SC memory\n"); + err = -ENOMEM; + goto scid_rollback; + } + + return 0; + +scid_rollback: + spin_lock(&data->scid_lock); + __clear_bit(bn, data->ctx_bm); + data->sc_id--; + spin_unlock(&data->scid_lock); + + return err; +} + +static void sa_cipher_cra_exit(struct crypto_skcipher *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + sa_free_ctx_info(&ctx->enc, data); + sa_free_ctx_info(&ctx->dec, data); + + crypto_free_sync_skcipher(ctx->fallback.skcipher); +} + +static int sa_cipher_cra_init(struct crypto_skcipher *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + const char *name = crypto_tfm_alg_name(&tfm->base); + int ret; + + memzero_explicit(ctx, sizeof(*ctx)); + ctx->dev_data = data; + + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + ret = sa_init_ctx_info(&ctx->dec, data); + if (ret) { + sa_free_ctx_info(&ctx->enc, data); + return ret; + } + + ctx->fallback.skcipher = + crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback.skcipher)) { + dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name); + return PTR_ERR(ctx->fallback.skcipher); + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + return 0; +} + +static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen, struct algo_data *ad) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int cmdl_len; + struct sa_cmdl_cfg cfg; + int ret; + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + ad->enc_eng.eng_id = SA_ENG_ID_EM1; + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + + memzero_explicit(&cfg, sizeof(cfg)); + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.iv_size = crypto_skcipher_ivsize(tfm); + + crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher, + CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(ctx->fallback.skcipher, + tfm->base.crt_flags & + CRYPTO_TFM_REQ_MASK); + ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen); + if (ret) + return ret; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, key, keylen, ad, 1, &ctx->enc.epib[1])) + goto badkey; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->enc.cmdl_size = cmdl_len; + + /* Setup Decryption Security Context & Command label template */ + if (sa_init_sc(&ctx->dec, key, keylen, ad, 0, &ctx->dec.epib[1])) + goto badkey; + + cfg.enc_eng_id = ad->enc_eng.eng_id; + cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl, + &ctx->dec.cmdl_upd_info); + + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->dec.cmdl_size = cmdl_len; + ctx->iv_idx = ad->iv_idx; + + return 0; + +badkey: + dev_err(sa_k3_dev, "%s: badkey\n", __func__); + return -EINVAL; +} + +static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + int key_idx = (keylen >> 3) - 2; + + if (key_idx >= 3) + return -EINVAL; + + ad.mci_enc = mci_cbc_enc_array[key_idx]; + ad.mci_dec = mci_cbc_dec_array[key_idx]; + ad.inv_key = true; + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.iv_idx = 4; + ad.iv_out_size = 16; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + int key_idx = (keylen >> 3) - 2; + + if (key_idx >= 3) + return -EINVAL; + + ad.mci_enc = mci_ecb_enc_array[key_idx]; + ad.mci_dec = mci_ecb_dec_array[key_idx]; + ad.inv_key = true; + ad.ealg_id = SA_EALG_ID_AES_ECB; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.mci_enc = mci_cbc_3des_enc_array; + ad.mci_dec = mci_cbc_3des_dec_array; + ad.ealg_id = SA_EALG_ID_3DES_CBC; + ad.iv_idx = 6; + ad.iv_out_size = 8; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.mci_enc = mci_ecb_3des_enc_array; + ad.mci_dec = mci_ecb_3des_dec_array; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static void sa_aes_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct skcipher_request *req; + int sglen; + u32 *result; + __be32 *mdptr; + size_t ml, pl; + int i; + enum dma_data_direction dir_src; + bool diff_dst; + + req = container_of(rxd->req, struct skcipher_request, base); + sglen = sg_nents_for_len(req->src, req->cryptlen); + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + + if (req->iv) { + mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, + &ml); + result = (u32 *)req->iv; + + for (i = 0; i < (rxd->enc_iv_size / 4); i++) + result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]); + } + + dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src); + kfree(rxd->split_src_sg); + + if (diff_dst) { + sglen = sg_nents_for_len(req->dst, req->cryptlen); + + dma_unmap_sg(rxd->ddev, req->dst, sglen, + DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + kfree(rxd); + + skcipher_request_complete(req, 0); +} + +static void +sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib) +{ + u32 *out, *in; + int i; + + for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++) + *out++ = *in++; + + mdptr[4] = (0xFFFF << 16); + for (out = &mdptr[5], in = psdata, i = 0; + i < pslen / sizeof(u32); i++) + *out++ = *in++; +} + +static int sa_run(struct sa_req *req) +{ + struct sa_rx_data *rxd; + gfp_t gfp_flags; + u32 cmdl[SA_MAX_CMDL_WORDS]; + struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev); + struct device *ddev; + struct dma_chan *dma_rx; + int sg_nents, src_nents, dst_nents; + int mapped_src_nents, mapped_dst_nents; + struct scatterlist *src, *dst; + size_t pl, ml, split_size; + struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec; + int ret; + struct dma_async_tx_descriptor *tx_out; + u32 *mdptr; + bool diff_dst; + enum dma_data_direction dir_src; + + gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + rxd = kzalloc(sizeof(*rxd), gfp_flags); + if (!rxd) + return -ENOMEM; + + if (req->src != req->dst) { + diff_dst = true; + dir_src = DMA_TO_DEVICE; + } else { + diff_dst = false; + dir_src = DMA_BIDIRECTIONAL; + } + + /* + * SA2UL has an interesting feature where the receive DMA channel + * is selected based on the data passed to the engine. Within the + * transition range, there is also a space where it is impossible + * to determine where the data will end up, and this should be + * avoided. This will be handled by the SW fallback mechanism by + * the individual algorithm implementations. + */ + if (req->size >= 256) + dma_rx = pdata->dma_rx2; + else + dma_rx = pdata->dma_rx1; + + ddev = dma_rx->device->dev; + + memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size); + + sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info); + + if (req->type != CRYPTO_ALG_TYPE_AHASH) { + if (req->enc) + req->type |= + (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT); + else + req->type |= + (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT); + } + + cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type; + + /* + * Map the packets, first we check if the data fits into a single + * sg entry and use that if possible. If it does not fit, we check + * if we need to do sg_split to align the scatterlist data on the + * actual data size being processed by the crypto engine. + */ + src = req->src; + sg_nents = sg_nents_for_len(src, req->size); + + split_size = req->size; + + if (sg_nents == 1 && split_size <= req->src->length) { + src = &rxd->rx_sg; + sg_init_table(src, 1); + sg_set_page(src, sg_page(req->src), split_size, + req->src->offset); + src_nents = 1; + dma_map_sg(ddev, src, sg_nents, dir_src); + } else { + mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents, + dir_src); + ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size, + &src, &src_nents, gfp_flags); + if (ret) { + src_nents = sg_nents; + src = req->src; + } else { + rxd->split_src_sg = src; + } + } + + if (!diff_dst) { + dst_nents = src_nents; + dst = src; + } else { + dst_nents = sg_nents_for_len(req->dst, req->size); + + if (dst_nents == 1 && split_size <= req->dst->length) { + dst = &rxd->tx_sg; + sg_init_table(dst, 1); + sg_set_page(dst, sg_page(req->dst), split_size, + req->dst->offset); + dst_nents = 1; + dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE); + } else { + mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents, + DMA_FROM_DEVICE); + ret = sg_split(req->dst, mapped_dst_nents, 0, 1, + &split_size, &dst, &dst_nents, + gfp_flags); + if (ret) { + dst_nents = dst_nents; + dst = req->dst; + } else { + rxd->split_dst_sg = dst; + } + } + } + + if (unlikely(src_nents != sg_nents)) { + dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n"); + ret = -EIO; + goto err_cleanup; + } + + rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxd->tx_in) { + dev_err(pdata->dev, "IN prep_slave_sg() failed\n"); + ret = -EINVAL; + goto err_cleanup; + } + + rxd->req = (void *)req->base; + rxd->enc = req->enc; + rxd->ddev = ddev; + rxd->src = src; + rxd->dst = dst; + rxd->iv_idx = req->ctx->iv_idx; + rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size; + rxd->tx_in->callback = req->callback; + rxd->tx_in->callback_param = rxd; + + tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src, + src_nents, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + + if (!tx_out) { + dev_err(pdata->dev, "OUT prep_slave_sg() failed\n"); + ret = -EINVAL; + goto err_cleanup; + } + + /* + * Prepare metadata for DMA engine. This essentially describes the + * crypto algorithm to be used, data sizes, different keys etc. + */ + mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml); + + sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * + sizeof(u32))), cmdl, sizeof(sa_ctx->epib), + sa_ctx->epib); + + ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32)); + dmaengine_desc_set_metadata_len(tx_out, req->mdata_size); + + dmaengine_submit(tx_out); + dmaengine_submit(rxd->tx_in); + + dma_async_issue_pending(dma_rx); + dma_async_issue_pending(pdata->dma_tx); + + return -EINPROGRESS; + +err_cleanup: + dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE); + kfree(rxd->split_src_sg); + + if (req->src != req->dst) { + dst_nents = sg_nents_for_len(req->dst, req->size); + dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + kfree(rxd); + + return ret; +} + +static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc) +{ + struct sa_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct crypto_alg *alg = req->base.tfm->__crt_alg; + struct sa_req sa_req = { 0 }; + int ret; + + if (!req->cryptlen) + return 0; + + if (req->cryptlen % alg->cra_blocksize) + return -EINVAL; + + /* Use SW fallback if the data size is not supported */ + if (req->cryptlen > SA_MAX_DATA_SZ || + (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN && + req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) { + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher); + + skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + if (enc) + ret = crypto_skcipher_encrypt(subreq); + else + ret = crypto_skcipher_decrypt(subreq); + + skcipher_request_zero(subreq); + return ret; + } + + sa_req.size = req->cryptlen; + sa_req.enc_size = req->cryptlen; + sa_req.src = req->src; + sa_req.dst = req->dst; + sa_req.enc_iv = iv; + sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER; + sa_req.enc = enc; + sa_req.callback = sa_aes_dma_in_callback; + sa_req.mdata_size = 44; + sa_req.base = &req->base; + sa_req.ctx = ctx; + + return sa_run(&sa_req); +} + +static int sa_encrypt(struct skcipher_request *req) +{ + return sa_cipher_run(req, req->iv, 1); +} + +static int sa_decrypt(struct skcipher_request *req) +{ + return sa_cipher_run(req, req->iv, 0); +} + +static struct sa_alg_tmpl sa_algs[] = { + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sa_aes_cbc_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = sa_aes_ecb_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "cbc-des3-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = sa_3des_cbc_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "ecb-des3-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .setkey = sa_3des_ecb_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, +}; + +/* Register the algorithms in crypto framework */ +static void sa_register_algos(const struct device *dev) +{ + char *alg_name; + u32 type; + int i, err; + + for (i = 0; i < ARRAY_SIZE(sa_algs); i++) { + type = sa_algs[i].type; + if (type == CRYPTO_ALG_TYPE_SKCIPHER) { + alg_name = sa_algs[i].alg.skcipher.base.cra_name; + err = crypto_register_skcipher(&sa_algs[i].alg.skcipher); + } else { + dev_err(dev, + "un-supported crypto algorithm (%d)", + sa_algs[i].type); + continue; + } + + if (err) + dev_err(dev, "Failed to register '%s'\n", alg_name); + else + sa_algs[i].registered = true; + } +} + +/* Unregister the algorithms in crypto framework */ +static void sa_unregister_algos(const struct device *dev) +{ + u32 type; + int i; + + for (i = 0; i < ARRAY_SIZE(sa_algs); i++) { + type = sa_algs[i].type; + if (!sa_algs[i].registered) + continue; + if (type == CRYPTO_ALG_TYPE_SKCIPHER) + crypto_unregister_skcipher(&sa_algs[i].alg.skcipher); + + sa_algs[i].registered = false; + } +} + +static int sa_init_mem(struct sa_crypto_data *dev_data) +{ + struct device *dev = &dev_data->pdev->dev; + /* Setup dma pool for security context buffers */ + dev_data->sc_pool = dma_pool_create("keystone-sc", dev, + SA_CTX_MAX_SZ, 64, 0); + if (!dev_data->sc_pool) { + dev_err(dev, "Failed to create dma pool"); + return -ENOMEM; + } + + return 0; +} + +static int sa_dma_init(struct sa_crypto_data *dd) +{ + int ret; + struct dma_slave_config cfg; + + dd->dma_rx1 = NULL; + dd->dma_tx = NULL; + dd->dma_rx2 = NULL; + + ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48)); + if (ret) + return ret; + + dd->dma_rx1 = dma_request_chan(dd->dev, "rx1"); + if (IS_ERR(dd->dma_rx1)) { + if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request rx1 DMA channel\n"); + return PTR_ERR(dd->dma_rx1); + } + + dd->dma_rx2 = dma_request_chan(dd->dev, "rx2"); + if (IS_ERR(dd->dma_rx2)) { + dma_release_channel(dd->dma_rx1); + if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request rx2 DMA channel\n"); + return PTR_ERR(dd->dma_rx2); + } + + dd->dma_tx = dma_request_chan(dd->dev, "tx"); + if (IS_ERR(dd->dma_tx)) { + if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request tx DMA channel\n"); + ret = PTR_ERR(dd->dma_tx); + goto err_dma_tx; + } + + memzero_explicit(&cfg, sizeof(cfg)); + + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 4; + cfg.dst_maxburst = 4; + + ret = dmaengine_slave_config(dd->dma_rx1, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", + ret); + return ret; + } + + ret = dmaengine_slave_config(dd->dma_rx2, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", + ret); + return ret; + } + + ret = dmaengine_slave_config(dd->dma_tx, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", + ret); + return ret; + } + + return 0; + +err_dma_tx: + dma_release_channel(dd->dma_rx1); + dma_release_channel(dd->dma_rx2); + + return ret; +} + +static int sa_ul_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + static void __iomem *saul_base; + struct sa_crypto_data *dev_data; + u32 val; + int ret; + + dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL); + if (!dev_data) + return -ENOMEM; + + sa_k3_dev = dev; + dev_data->dev = dev; + dev_data->pdev = pdev; + platform_set_drvdata(pdev, dev_data); + dev_set_drvdata(sa_k3_dev, dev_data); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret) { + dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__, + ret); + return ret; + } + + sa_init_mem(dev_data); + ret = sa_dma_init(dev_data); + if (ret) + goto disable_pm_runtime; + + spin_lock_init(&dev_data->scid_lock); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + saul_base = devm_ioremap_resource(dev, res); + + dev_data->base = saul_base; + val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | + SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | + SA_EEC_TRNG_EN; + + writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); + + sa_register_algos(dev); + + ret = of_platform_populate(node, NULL, NULL, &pdev->dev); + if (ret) + goto release_dma; + + return 0; + +release_dma: + sa_unregister_algos(&pdev->dev); + + dma_release_channel(dev_data->dma_rx2); + dma_release_channel(dev_data->dma_rx1); + dma_release_channel(dev_data->dma_tx); + + dma_pool_destroy(dev_data->sc_pool); + +disable_pm_runtime: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int sa_ul_remove(struct platform_device *pdev) +{ + struct sa_crypto_data *dev_data = platform_get_drvdata(pdev); + + sa_unregister_algos(&pdev->dev); + + dma_release_channel(dev_data->dma_rx2); + dma_release_channel(dev_data->dma_rx1); + dma_release_channel(dev_data->dma_tx); + + dma_pool_destroy(dev_data->sc_pool); + + platform_set_drvdata(pdev, NULL); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id of_match[] = { + {.compatible = "ti,j721e-sa2ul",}, + {.compatible = "ti,am654-sa2ul",}, + {}, +}; +MODULE_DEVICE_TABLE(of, of_match); + +static struct platform_driver sa_ul_driver = { + .probe = sa_ul_probe, + .remove = sa_ul_remove, + .driver = { + .name = "saul-crypto", + .of_match_table = of_match, + }, +}; +module_platform_driver(sa_ul_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h new file mode 100644 index 000000000000..45ba86cb5d11 --- /dev/null +++ b/drivers/crypto/sa2ul.h @@ -0,0 +1,380 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * K3 SA2UL crypto accelerator driver + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Keerthy + * Vitaly Andrianov + * Tero Kristo + */ + +#ifndef _K3_SA2UL_ +#define _K3_SA2UL_ + +#include +#include +#include +#include + +#define SA_ENGINE_ENABLE_CONTROL 0x1000 + +struct sa_tfm_ctx; +/* + * SA_ENGINE_ENABLE_CONTROL register bits + */ +#define SA_EEC_ENCSS_EN 0x00000001 +#define SA_EEC_AUTHSS_EN 0x00000002 +#define SA_EEC_TRNG_EN 0x00000008 +#define SA_EEC_PKA_EN 0x00000010 +#define SA_EEC_CTXCACH_EN 0x00000080 +#define SA_EEC_CPPI_PORT_IN_EN 0x00000200 +#define SA_EEC_CPPI_PORT_OUT_EN 0x00000800 + +/* + * Encoding used to identify the typo of crypto operation + * performed on the packet when the packet is returned + * by SA + */ +#define SA_REQ_SUBTYPE_ENC 0x0001 +#define SA_REQ_SUBTYPE_DEC 0x0002 +#define SA_REQ_SUBTYPE_SHIFT 16 +#define SA_REQ_SUBTYPE_MASK 0xffff + +/* Number of 32 bit words in EPIB */ +#define SA_DMA_NUM_EPIB_WORDS 4 + +/* Number of 32 bit words in PS data */ +#define SA_DMA_NUM_PS_WORDS 16 +#define NKEY_SZ 3 +#define MCI_SZ 27 + +/* + * Maximum number of simultaeneous security contexts + * supported by the driver + */ +#define SA_MAX_NUM_CTX 512 + +/* + * Assumption: CTX size is multiple of 32 + */ +#define SA_CTX_SIZE_TO_DMA_SIZE(ctx_sz) \ + ((ctx_sz) ? ((ctx_sz) / 32 - 1) : 0) + +#define SA_CTX_ENC_KEY_OFFSET 32 +#define SA_CTX_ENC_AUX1_OFFSET 64 +#define SA_CTX_ENC_AUX2_OFFSET 96 +#define SA_CTX_ENC_AUX3_OFFSET 112 +#define SA_CTX_ENC_AUX4_OFFSET 128 + +/* Next Engine Select code in CP_ACE */ +#define SA_ENG_ID_EM1 2 /* Enc/Dec engine with AES/DEC core */ +#define SA_ENG_ID_EM2 3 /* Encryption/Decryption enginefor pass 2 */ +#define SA_ENG_ID_AM1 4 /* Auth. engine with SHA1/MD5/SHA2 core */ +#define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */ +#define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */ +#define SA_ENG_ID_NONE 0xff + +/* + * Command Label Definitions + */ +#define SA_CMDL_OFFSET_NESC 0 /* Next Engine Select Code */ +#define SA_CMDL_OFFSET_LABEL_LEN 1 /* Engine Command Label Length */ +/* 16-bit Length of Data to be processed */ +#define SA_CMDL_OFFSET_DATA_LEN 2 +#define SA_CMDL_OFFSET_DATA_OFFSET 4 /* Stat Data Offset */ +#define SA_CMDL_OFFSET_OPTION_CTRL1 5 /* Option Control Byte 1 */ +#define SA_CMDL_OFFSET_OPTION_CTRL2 6 /* Option Control Byte 2 */ +#define SA_CMDL_OFFSET_OPTION_CTRL3 7 /* Option Control Byte 3 */ +#define SA_CMDL_OFFSET_OPTION_BYTE 8 + +#define SA_CMDL_HEADER_SIZE_BYTES 8 + +#define SA_CMDL_OPTION_BYTES_MAX_SIZE 72 +#define SA_CMDL_MAX_SIZE_BYTES (SA_CMDL_HEADER_SIZE_BYTES + \ + SA_CMDL_OPTION_BYTES_MAX_SIZE) + +/* SWINFO word-0 flags */ +#define SA_SW_INFO_FLAG_EVICT 0x0001 +#define SA_SW_INFO_FLAG_TEAR 0x0002 +#define SA_SW_INFO_FLAG_NOPD 0x0004 + +/* + * This type represents the various packet types to be processed + * by the PHP engine in SA. + * It is used to identify the corresponding PHP processing function. + */ +#define SA_CTX_PE_PKT_TYPE_3GPP_AIR 0 /* 3GPP Air Cipher */ +#define SA_CTX_PE_PKT_TYPE_SRTP 1 /* SRTP */ +#define SA_CTX_PE_PKT_TYPE_IPSEC_AH 2 /* IPSec Authentication Header */ +/* IPSec Encapsulating Security Payload */ +#define SA_CTX_PE_PKT_TYPE_IPSEC_ESP 3 +/* Indicates that it is in data mode, It may not be used by PHP */ +#define SA_CTX_PE_PKT_TYPE_NONE 4 +#define SA_CTX_ENC_TYPE1_SZ 64 /* Encryption SC with Key only */ +#define SA_CTX_ENC_TYPE2_SZ 96 /* Encryption SC with Key and Aux1 */ + +#define SA_CTX_AUTH_TYPE1_SZ 64 /* Auth SC with Key only */ +#define SA_CTX_AUTH_TYPE2_SZ 96 /* Auth SC with Key and Aux1 */ +/* Size of security context for PHP engine */ +#define SA_CTX_PHP_PE_CTX_SZ 64 + +#define SA_CTX_MAX_SZ (64 + SA_CTX_ENC_TYPE2_SZ + SA_CTX_AUTH_TYPE2_SZ) + +/* + * Encoding of F/E control in SCCTL + * Bit 0-1: Fetch PHP Bytes + * Bit 2-3: Fetch Encryption/Air Ciphering Bytes + * Bit 4-5: Fetch Authentication Bytes or Encr pass 2 + * Bit 6-7: Evict PHP Bytes + * + * where 00 = 0 bytes + * 01 = 64 bytes + * 10 = 96 bytes + * 11 = 128 bytes + */ +#define SA_CTX_DMA_SIZE_0 0 +#define SA_CTX_DMA_SIZE_64 1 +#define SA_CTX_DMA_SIZE_96 2 +#define SA_CTX_DMA_SIZE_128 3 + +/* + * Byte offset of the owner word in SCCTL + * in the security context + */ +#define SA_CTX_SCCTL_OWNER_OFFSET 0 + +#define SA_CTX_ENC_KEY_OFFSET 32 +#define SA_CTX_ENC_AUX1_OFFSET 64 +#define SA_CTX_ENC_AUX2_OFFSET 96 +#define SA_CTX_ENC_AUX3_OFFSET 112 +#define SA_CTX_ENC_AUX4_OFFSET 128 + +#define SA_SCCTL_FE_AUTH_ENC 0x65 +#define SA_SCCTL_FE_ENC 0x8D + +#define SA_ALIGN_MASK (sizeof(u32) - 1) +#define SA_ALIGNED __aligned(32) + +/* SA2UL can only handle maximum data size of 64KB */ +#define SA_MAX_DATA_SZ U16_MAX + +/* + * SA2UL can provide unpredictable results with packet sizes that fall + * the following range, so avoid using it. + */ +#define SA_UNSAFE_DATA_SZ_MIN 240 +#define SA_UNSAFE_DATA_SZ_MAX 256 + +/** + * struct sa_crypto_data - Crypto driver instance data + * @base: Base address of the register space + * @pdev: Platform device pointer + * @sc_pool: security context pool + * @dev: Device pointer + * @scid_lock: secure context ID lock + * @sc_id_start: starting index for SC ID + * @sc_id_end: Ending index for SC ID + * @sc_id: Security Context ID + * @ctx_bm: Bitmap to keep track of Security context ID's + * @ctx: SA tfm context pointer + * @dma_rx1: Pointer to DMA rx channel for sizes < 256 Bytes + * @dma_rx2: Pointer to DMA rx channel for sizes > 256 Bytes + * @dma_tx: Pointer to DMA TX channel + */ +struct sa_crypto_data { + void __iomem *base; + struct platform_device *pdev; + struct dma_pool *sc_pool; + struct device *dev; + spinlock_t scid_lock; /* lock for SC-ID allocation */ + /* Security context data */ + u16 sc_id_start; + u16 sc_id_end; + u16 sc_id; + unsigned long ctx_bm[DIV_ROUND_UP(SA_MAX_NUM_CTX, + BITS_PER_LONG)]; + struct sa_tfm_ctx *ctx; + struct dma_chan *dma_rx1; + struct dma_chan *dma_rx2; + struct dma_chan *dma_tx; +}; + +/** + * struct sa_cmdl_param_info: Command label parameters info + * @index: Index of the parameter in the command label format + * @offset: the offset of the parameter + * @size: Size of the parameter + */ +struct sa_cmdl_param_info { + u16 index; + u16 offset; + u16 size; +}; + +/* Maximum length of Auxiliary data in 32bit words */ +#define SA_MAX_AUX_DATA_WORDS 8 + +/** + * struct sa_cmdl_upd_info: Command label updation info + * @flags: flags in command label + * @submode: Encryption submodes + * @enc_size: Size of first pass encryption size + * @enc_size2: Size of second pass encryption size + * @enc_offset: Encryption payload offset in the packet + * @enc_iv: Encryption initialization vector for pass2 + * @enc_iv2: Encryption initialization vector for pass2 + * @aad: Associated data + * @payload: Payload info + * @auth_size: Authentication size for pass 1 + * @auth_size2: Authentication size for pass 2 + * @auth_offset: Authentication payload offset + * @auth_iv: Authentication initialization vector + * @aux_key_info: Authentication aux key information + * @aux_key: Aux key for authentication + */ +struct sa_cmdl_upd_info { + u16 flags; + u16 submode; + struct sa_cmdl_param_info enc_size; + struct sa_cmdl_param_info enc_size2; + struct sa_cmdl_param_info enc_offset; + struct sa_cmdl_param_info enc_iv; + struct sa_cmdl_param_info enc_iv2; + struct sa_cmdl_param_info aad; + struct sa_cmdl_param_info payload; + struct sa_cmdl_param_info auth_size; + struct sa_cmdl_param_info auth_size2; + struct sa_cmdl_param_info auth_offset; + struct sa_cmdl_param_info auth_iv; + struct sa_cmdl_param_info aux_key_info; + u32 aux_key[SA_MAX_AUX_DATA_WORDS]; +}; + +/* + * Number of 32bit words appended after the command label + * in PSDATA to identify the crypto request context. + * word-0: Request type + * word-1: pointer to request + */ +#define SA_PSDATA_CTX_WORDS 4 + +/* Maximum size of Command label in 32 words */ +#define SA_MAX_CMDL_WORDS (SA_DMA_NUM_PS_WORDS - SA_PSDATA_CTX_WORDS) + +/** + * struct sa_ctx_info: SA context information + * @sc: Pointer to security context + * @sc_phys: Security context physical address that is passed on to SA2UL + * @sc_id: Security context ID + * @cmdl_size: Command label size + * @cmdl: Command label for a particular iteration + * @cmdl_upd_info: structure holding command label updation info + * @epib: Extended protocol information block words + */ +struct sa_ctx_info { + u8 *sc; + dma_addr_t sc_phys; + u16 sc_id; + u16 cmdl_size; + u32 cmdl[SA_MAX_CMDL_WORDS]; + struct sa_cmdl_upd_info cmdl_upd_info; + /* Store Auxiliary data such as K2/K3 subkeys in AES-XCBC */ + u32 epib[SA_DMA_NUM_EPIB_WORDS]; +}; + +/** + * struct sa_tfm_ctx: TFM context structure + * @dev_data: struct sa_crypto_data pointer + * @enc: struct sa_ctx_info for encryption + * @dec: struct sa_ctx_info for decryption + * @keylen: encrption/decryption keylength + * @iv_idx: Initialization vector index + * @key: encryption key + * @fallback: SW fallback algorithm + */ +struct sa_tfm_ctx { + struct sa_crypto_data *dev_data; + struct sa_ctx_info enc; + struct sa_ctx_info dec; + int keylen; + int iv_idx; + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + /* for fallback */ + union { + struct crypto_sync_skcipher *skcipher; + } fallback; +}; + +enum sa_submode { + SA_MODE_GEN = 0, + SA_MODE_CCM, + SA_MODE_GCM, + SA_MODE_GMAC +}; + +/* Encryption algorithms */ +enum sa_ealg_id { + SA_EALG_ID_NONE = 0, /* No encryption */ + SA_EALG_ID_NULL, /* NULL encryption */ + SA_EALG_ID_AES_CTR, /* AES Counter mode */ + SA_EALG_ID_AES_F8, /* AES F8 mode */ + SA_EALG_ID_AES_CBC, /* AES CBC mode */ + SA_EALG_ID_DES_CBC, /* DES CBC mode */ + SA_EALG_ID_3DES_CBC, /* 3DES CBC mode */ + SA_EALG_ID_CCM, /* Counter with CBC-MAC mode */ + SA_EALG_ID_GCM, /* Galois Counter mode */ + SA_EALG_ID_AES_ECB, + SA_EALG_ID_LAST +}; + +/* Authentication algorithms */ +enum sa_aalg_id { + SA_AALG_ID_NONE = 0, /* No Authentication */ + SA_AALG_ID_NULL = SA_EALG_ID_LAST, /* NULL Authentication */ + SA_AALG_ID_MD5, /* MD5 mode */ + SA_AALG_ID_SHA1, /* SHA1 mode */ + SA_AALG_ID_SHA2_224, /* 224-bit SHA2 mode */ + SA_AALG_ID_SHA2_256, /* 256-bit SHA2 mode */ + SA_AALG_ID_SHA2_512, /* 512-bit SHA2 mode */ + SA_AALG_ID_HMAC_MD5, /* HMAC with MD5 mode */ + SA_AALG_ID_HMAC_SHA1, /* HMAC with SHA1 mode */ + SA_AALG_ID_HMAC_SHA2_224, /* HMAC with 224-bit SHA2 mode */ + SA_AALG_ID_HMAC_SHA2_256, /* HMAC with 256-bit SHA2 mode */ + SA_AALG_ID_GMAC, /* Galois Message Auth. Code mode */ + SA_AALG_ID_CMAC, /* Cipher-based Mes. Auth. Code mode */ + SA_AALG_ID_CBC_MAC, /* Cipher Block Chaining */ + SA_AALG_ID_AES_XCBC /* AES Extended Cipher Block Chaining */ +}; + +/* + * Mode control engine algorithms used to index the + * mode control instruction tables + */ +enum sa_eng_algo_id { + SA_ENG_ALGO_ECB = 0, + SA_ENG_ALGO_CBC, + SA_ENG_ALGO_CFB, + SA_ENG_ALGO_OFB, + SA_ENG_ALGO_CTR, + SA_ENG_ALGO_F8, + SA_ENG_ALGO_F8F9, + SA_ENG_ALGO_GCM, + SA_ENG_ALGO_GMAC, + SA_ENG_ALGO_CCM, + SA_ENG_ALGO_CMAC, + SA_ENG_ALGO_CBCMAC, + SA_NUM_ENG_ALGOS +}; + +/** + * struct sa_eng_info: Security accelerator engine info + * @eng_id: Engine ID + * @sc_size: security context size + */ +struct sa_eng_info { + u8 eng_id; + u16 sc_size; +}; + +#endif /* _K3_SA2UL_ */ From 2dc53d0047458e28ed05b4548844ba78199857bf Mon Sep 17 00:00:00 2001 From: Keerthy Date: Mon, 13 Jul 2020 11:34:23 +0300 Subject: [PATCH 119/157] crypto: sa2ul - add sha1/sha256/sha512 support Add support for sha1/sha256/sha512 sa2ul based hardware authentication. With the hash update mechanism, we always use software fallback mechanism for now, as there is no way to fetch the partial hash state from the HW accelerator. HW accelerator is only used when digest is called for a data chunk of known size. Signed-off-by: Keerthy [t-kristo@ti.com: various bug fixes, major cleanups and refactoring of code] Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/sa2ul.c | 549 ++++++++++++++++++++++++++++++++++++++++- drivers/crypto/sa2ul.h | 24 +- 2 files changed, 560 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 860c7435fefa..761754dd2b88 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -18,8 +18,10 @@ #include #include +#include #include #include +#include #include "sa2ul.h" @@ -69,20 +71,32 @@ static struct device *sa_k3_dev; /** * struct sa_cmdl_cfg - Command label configuration descriptor + * @aalg: authentication algorithm ID * @enc_eng_id: Encryption Engine ID supported by the SA hardware + * @auth_eng_id: Authentication Engine ID * @iv_size: Initialization Vector size + * @akey: Authentication key + * @akey_len: Authentication key length */ struct sa_cmdl_cfg { + int aalg; u8 enc_eng_id; + u8 auth_eng_id; u8 iv_size; + const u8 *akey; + u16 akey_len; }; /** * struct algo_data - Crypto algorithm specific data * @enc_eng: Encryption engine info structure + * @auth_eng: Authentication engine info structure + * @auth_ctrl: Authentication control word + * @hash_size: Size of digest * @iv_idx: iv index in psdata * @iv_out_size: iv out size * @ealg_id: Encryption Algorithm ID + * @aalg_id: Authentication algorithm ID * @mci_enc: Mode Control Instruction for Encryption algorithm * @mci_dec: Mode Control Instruction for Decryption * @inv_key: Whether the encryption algorithm demands key inversion @@ -90,9 +104,13 @@ struct sa_cmdl_cfg { */ struct algo_data { struct sa_eng_info enc_eng; + struct sa_eng_info auth_eng; + u8 auth_ctrl; + u8 hash_size; u8 iv_idx; u8 iv_out_size; u8 ealg_id; + u8 aalg_id; u8 *mci_enc; u8 *mci_dec; bool inv_key; @@ -109,6 +127,7 @@ struct sa_alg_tmpl { u32 type; /* CRYPTO_ALG_TYPE from */ union { struct skcipher_alg skcipher; + struct ahash_alg ahash; } alg; bool registered; }; @@ -150,6 +169,9 @@ struct sa_rx_data { * @enc_offset: offset of cipher data * @enc_size: data to be passed to cipher engine * @enc_iv: cipher IV + * @auth_offset: offset of the authentication data + * @auth_size: size of the authentication data + * @auth_iv: authentication IV * @type: algorithm type for the request * @cmdl: command label pointer * @base: pointer to the base request @@ -166,6 +188,9 @@ struct sa_req { u8 enc_offset; u16 enc_size; u8 *enc_iv; + u8 auth_offset; + u16 auth_size; + u8 *auth_iv; u32 type; u32 *cmdl; struct crypto_async_request *base; @@ -354,6 +379,20 @@ static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz, return 0; } +/* Set Security context for the authentication engine */ +static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz, + u8 *sc_buf) +{ + /* Set Authentication mode selector to hash processing */ + sc_buf[0] = SA_HASH_PROCESSING; + /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */ + sc_buf[1] = SA_UPLOAD_HASH_TO_TLR; + sc_buf[1] |= ad->auth_ctrl; + + /* basic hash */ + sc_buf[1] |= SA_BASIC_HASH; +} + static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16) { int j; @@ -369,8 +408,9 @@ static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16) static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, struct sa_cmdl_upd_info *upd_info) { - u8 enc_offset = 0, total = 0; + u8 enc_offset = 0, auth_offset = 0, total = 0; u8 enc_next_eng = SA_ENG_ID_OUTPORT2; + u8 auth_next_eng = SA_ENG_ID_OUTPORT2; u32 *word_ptr = (u32 *)cmdl; int i; @@ -380,7 +420,10 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, /* Iniialize the command update structure */ memzero_explicit(upd_info, sizeof(*upd_info)); - if (cfg->enc_eng_id != SA_ENG_ID_NONE) + if (cfg->enc_eng_id) + total = SA_CMDL_HEADER_SIZE_BYTES; + + if (cfg->auth_eng_id) total = SA_CMDL_HEADER_SIZE_BYTES; if (cfg->iv_size) @@ -388,7 +431,7 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, enc_next_eng = SA_ENG_ID_OUTPORT2; - if (cfg->enc_eng_id != SA_ENG_ID_NONE) { + if (cfg->enc_eng_id) { upd_info->flags |= SA_CMDL_UPD_ENC; upd_info->enc_size.index = enc_offset >> 2; upd_info->enc_offset.index = upd_info->enc_size.index + 1; @@ -415,6 +458,16 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, } } + if (cfg->auth_eng_id) { + upd_info->flags |= SA_CMDL_UPD_AUTH; + upd_info->auth_size.index = auth_offset >> 2; + upd_info->auth_offset.index = upd_info->auth_size.index + 1; + cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng; + cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES; + total += SA_CMDL_HEADER_SIZE_BYTES; + } + total = roundup(total, 8); for (i = 0; i < total / 4; i++) @@ -448,6 +501,27 @@ static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl, } } } + + if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) { + cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK; + cmdl[upd_info->auth_size.index] |= req->auth_size; + cmdl[upd_info->auth_offset.index] &= + ~SA_CMDL_SOP_BYPASS_LEN_MASK; + cmdl[upd_info->auth_offset.index] |= + ((u32)req->auth_offset << + __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK)); + if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) { + sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index], + req->auth_iv, + (upd_info->auth_iv.size > 8)); + } + if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) { + int offset = (req->auth_size & 0xF) ? 4 : 0; + + memcpy(&cmdl[upd_info->aux_key_info.index], + &upd_info->aux_key[offset], 16); + } + } } /* Format SWINFO words to be sent to SA */ @@ -481,21 +555,34 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr) static int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, - u16 enc_key_sz, struct algo_data *ad, u8 enc, u32 *swinfo) + u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz, + struct algo_data *ad, u8 enc, u32 *swinfo) { int enc_sc_offset = 0; + int auth_sc_offset = 0; u8 *sc_buf = ctx->sc; u16 sc_id = ctx->sc_id; u8 first_engine; memzero_explicit(sc_buf, SA_CTX_MAX_SZ); - enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + if (ad->enc_eng.eng_id) { + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + first_engine = ad->enc_eng.eng_id; + sc_buf[1] = SA_SCCTL_FE_ENC; + ad->hash_size = ad->iv_out_size; + } else { + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size; + first_engine = ad->auth_eng.eng_id; + sc_buf[1] = SA_SCCTL_FE_AUTH_ENC; + if (!ad->hash_size) + return -EINVAL; + ad->hash_size = roundup(ad->hash_size, 8); + } /* SCCTL Owner info: 0=host, 1=CP_ACE */ sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0; - /* SCCTL F/E control */ - sc_buf[1] = SA_SCCTL_FE_ENC; memcpy(&sc_buf[2], &sc_id, 2); sc_buf[4] = 0x0; sc_buf[5] = PRIV_ID; @@ -509,16 +596,19 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, return -EINVAL; } + /* Prepare context for authentication engine */ + if (ad->auth_eng.sc_size) + sa_set_sc_auth(ad, auth_key, auth_key_sz, + &sc_buf[auth_sc_offset]); + /* Set the ownership of context to CP_ACE */ sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80; /* swizzle the security context */ sa_swiz_128(sc_buf, SA_CTX_MAX_SZ); - /* Setup SWINFO */ - first_engine = ad->enc_eng.eng_id; sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0, - SA_SW_INFO_FLAG_EVICT, ad->iv_out_size, swinfo); + SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo); sa_dump_sc(sc_buf, ctx->sc_phys); @@ -652,7 +742,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, return ret; /* Setup Encryption Security Context & Command label template */ - if (sa_init_sc(&ctx->enc, key, keylen, ad, 1, &ctx->enc.epib[1])) + if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1, + &ctx->enc.epib[1])) goto badkey; cmdl_len = sa_format_cmdl_gen(&cfg, @@ -664,7 +755,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, ctx->enc.cmdl_size = cmdl_len; /* Setup Decryption Security Context & Command label template */ - if (sa_init_sc(&ctx->dec, key, keylen, ad, 0, &ctx->dec.epib[1])) + if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0, + &ctx->dec.epib[1])) goto badkey; cfg.enc_eng_id = ad->enc_eng.eng_id; @@ -1058,6 +1150,347 @@ static int sa_decrypt(struct skcipher_request *req) return sa_cipher_run(req, req->iv, 0); } +static void sa_sha_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct ahash_request *req; + struct crypto_ahash *tfm; + unsigned int authsize; + int i, sg_nents; + size_t ml, pl; + u32 *result; + __be32 *mdptr; + + req = container_of(rxd->req, struct ahash_request, base); + tfm = crypto_ahash_reqtfm(req); + authsize = crypto_ahash_digestsize(tfm); + + mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml); + result = (u32 *)req->result; + + for (i = 0; i < (authsize / 4); i++) + result[i] = be32_to_cpu(mdptr[i + 4]); + + sg_nents = sg_nents_for_len(req->src, req->nbytes); + dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE); + + kfree(rxd->split_src_sg); + + kfree(rxd); + + ahash_request_complete(req, 0); +} + +static int zero_message_process(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int sa_digest_size = crypto_ahash_digestsize(tfm); + + switch (sa_digest_size) { + case SHA1_DIGEST_SIZE: + memcpy(req->result, sha1_zero_message_hash, sa_digest_size); + break; + case SHA256_DIGEST_SIZE: + memcpy(req->result, sha256_zero_message_hash, sa_digest_size); + break; + case SHA512_DIGEST_SIZE: + memcpy(req->result, sha512_zero_message_hash, sa_digest_size); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sa_sha_run(struct ahash_request *req) +{ + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_req sa_req = { 0 }; + size_t auth_len; + + auth_len = req->nbytes; + + if (!auth_len) + return zero_message_process(req); + + if (auth_len > SA_MAX_DATA_SZ || + (auth_len >= SA_UNSAFE_DATA_SZ_MIN && + auth_len <= SA_UNSAFE_DATA_SZ_MAX)) { + struct ahash_request *subreq = &rctx->fallback_req; + int ret = 0; + + ahash_request_set_tfm(subreq, ctx->fallback.ahash); + subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + crypto_ahash_init(subreq); + + subreq->nbytes = auth_len; + subreq->src = req->src; + subreq->result = req->result; + + ret |= crypto_ahash_update(subreq); + + subreq->nbytes = 0; + + ret |= crypto_ahash_final(subreq); + + return ret; + } + + sa_req.size = auth_len; + sa_req.auth_size = auth_len; + sa_req.src = req->src; + sa_req.dst = req->src; + sa_req.enc = true; + sa_req.type = CRYPTO_ALG_TYPE_AHASH; + sa_req.callback = sa_sha_dma_in_callback; + sa_req.mdata_size = 28; + sa_req.ctx = ctx; + sa_req.base = &req->base; + + return sa_run(&sa_req); +} + +static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad) +{ + int bs = crypto_shash_blocksize(ctx->shash); + int cmdl_len; + struct sa_cmdl_cfg cfg; + + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + ad->auth_eng.eng_id = SA_ENG_ID_AM1; + ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ; + + memset(ctx->authkey, 0, bs); + memset(&cfg, 0, sizeof(cfg)); + cfg.aalg = ad->aalg_id; + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.auth_eng_id = ad->auth_eng.eng_id; + cfg.iv_size = 0; + cfg.akey = NULL; + cfg.akey_len = 0; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0, + &ctx->enc.epib[1])) + goto badkey; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->enc.cmdl_size = cmdl_len; + + return 0; + +badkey: + dev_err(sa_k3_dev, "%s: badkey\n", __func__); + return -EINVAL; +} + +static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + int ret; + + memset(ctx, 0, sizeof(*ctx)); + ctx->dev_data = data; + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + + if (alg_base) { + ctx->shash = crypto_alloc_shash(alg_base, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->shash)) { + dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", + alg_base); + return PTR_ERR(ctx->shash); + } + /* for fallback */ + ctx->fallback.ahash = + crypto_alloc_ahash(alg_base, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback.ahash)) { + dev_err(ctx->dev_data->dev, + "Could not load fallback driver\n"); + return PTR_ERR(ctx->fallback.ahash); + } + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sa_sha_req_ctx) + + crypto_ahash_reqsize(ctx->fallback.ahash)); + + return 0; +} + +static int sa_sha_digest(struct ahash_request *req) +{ + return sa_sha_run(req); +} + +static int sa_sha_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n", + crypto_ahash_digestsize(tfm), (u64)rctx); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +static int sa_sha_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +static int sa_sha_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = req->result; + + return crypto_ahash_final(&rctx->fallback_req); +} + +static int sa_sha_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int sa_sha_import(struct ahash_request *req, const void *in) +{ + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +static int sa_sha_export(struct ahash_request *req, void *out) +{ + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = &rctx->fallback_req; + + ahash_request_set_tfm(subreq, ctx->fallback.ahash); + subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(subreq, out); +} + +static int sa_sha1_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha1"); + + ad.aalg_id = SA_AALG_ID_SHA1; + ad.hash_size = SHA1_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static int sa_sha256_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha256"); + + ad.aalg_id = SA_AALG_ID_SHA2_256; + ad.hash_size = SHA256_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static int sa_sha512_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha512"); + + ad.aalg_id = SA_AALG_ID_SHA2_512; + ad.hash_size = SHA512_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static void sa_sha_cra_exit(struct crypto_tfm *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH) + sa_free_ctx_info(&ctx->enc, data); + + crypto_free_shash(ctx->shash); + crypto_free_ahash(ctx->fallback.ahash); +} + static struct sa_alg_tmpl sa_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, @@ -1149,6 +1582,93 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha1_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha1_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha256_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha256_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha512_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha512_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, }; /* Register the algorithms in crypto framework */ @@ -1163,6 +1683,9 @@ static void sa_register_algos(const struct device *dev) if (type == CRYPTO_ALG_TYPE_SKCIPHER) { alg_name = sa_algs[i].alg.skcipher.base.cra_name; err = crypto_register_skcipher(&sa_algs[i].alg.skcipher); + } else if (type == CRYPTO_ALG_TYPE_AHASH) { + alg_name = sa_algs[i].alg.ahash.halg.base.cra_name; + err = crypto_register_ahash(&sa_algs[i].alg.ahash); } else { dev_err(dev, "un-supported crypto algorithm (%d)", @@ -1189,6 +1712,8 @@ static void sa_unregister_algos(const struct device *dev) continue; if (type == CRYPTO_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&sa_algs[i].alg.skcipher); + else if (type == CRYPTO_ALG_TYPE_AHASH) + crypto_unregister_ahash(&sa_algs[i].alg.ahash); sa_algs[i].registered = false; } diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h index 45ba86cb5d11..dc5e3470c3a0 100644 --- a/drivers/crypto/sa2ul.h +++ b/drivers/crypto/sa2ul.h @@ -73,7 +73,6 @@ struct sa_tfm_ctx; #define SA_ENG_ID_AM1 4 /* Auth. engine with SHA1/MD5/SHA2 core */ #define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */ #define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */ -#define SA_ENG_ID_NONE 0xff /* * Command Label Definitions @@ -156,6 +155,13 @@ struct sa_tfm_ctx; #define SA_ALIGN_MASK (sizeof(u32) - 1) #define SA_ALIGNED __aligned(32) +#define SA_AUTH_SW_CTRL_MD5 1 +#define SA_AUTH_SW_CTRL_SHA1 2 +#define SA_AUTH_SW_CTRL_SHA224 3 +#define SA_AUTH_SW_CTRL_SHA256 4 +#define SA_AUTH_SW_CTRL_SHA384 5 +#define SA_AUTH_SW_CTRL_SHA512 6 + /* SA2UL can only handle maximum data size of 64KB */ #define SA_MAX_DATA_SZ U16_MAX @@ -297,15 +303,31 @@ struct sa_tfm_ctx { struct sa_crypto_data *dev_data; struct sa_ctx_info enc; struct sa_ctx_info dec; + struct sa_ctx_info auth; int keylen; int iv_idx; u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + u8 authkey[SHA512_BLOCK_SIZE]; + struct crypto_shash *shash; /* for fallback */ union { struct crypto_sync_skcipher *skcipher; + struct crypto_ahash *ahash; } fallback; }; +/** + * struct sa_sha_req_ctx: Structure used for sha request + * @dev_data: struct sa_crypto_data pointer + * @cmdl: Complete command label with psdata and epib included + * @fallback_req: SW fallback request container + */ +struct sa_sha_req_ctx { + struct sa_crypto_data *dev_data; + u32 cmdl[SA_MAX_CMDL_WORDS + SA_PSDATA_CTX_WORDS]; + struct ahash_request fallback_req; +}; + enum sa_submode { SA_MODE_GEN = 0, SA_MODE_CCM, From d2c8ac187fc922e73930a1b2f6a211e27f595d01 Mon Sep 17 00:00:00 2001 From: Keerthy Date: Mon, 13 Jul 2020 11:34:24 +0300 Subject: [PATCH 120/157] crypto: sa2ul - Add AEAD algorithm support Add support for sa2ul hardware AEAD for hmac(sha256),cbc(aes) and hmac(sha1),cbc(aes) algorithms. Signed-off-by: Keerthy [t-kristo@ti.com: number of bug fixes, major refactoring and cleanup of code] Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/sa2ul.c | 538 +++++++++++++++++++++++++++++++++++++++-- drivers/crypto/sa2ul.h | 1 + 2 files changed, 518 insertions(+), 21 deletions(-) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 761754dd2b88..fb4c0aba9048 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -17,7 +17,9 @@ #include #include +#include #include +#include #include #include #include @@ -77,6 +79,7 @@ static struct device *sa_k3_dev; * @iv_size: Initialization Vector size * @akey: Authentication key * @akey_len: Authentication key length + * @enc: True, if this is an encode request */ struct sa_cmdl_cfg { int aalg; @@ -85,6 +88,7 @@ struct sa_cmdl_cfg { u8 iv_size; const u8 *akey; u16 akey_len; + bool enc; }; /** @@ -101,6 +105,8 @@ struct sa_cmdl_cfg { * @mci_dec: Mode Control Instruction for Decryption * @inv_key: Whether the encryption algorithm demands key inversion * @ctx: Pointer to the algorithm context + * @keyed_mac: Whether the authentication algorithm has key + * @prep_iopad: Function pointer to generate intermediate ipad/opad */ struct algo_data { struct sa_eng_info enc_eng; @@ -115,6 +121,9 @@ struct algo_data { u8 *mci_dec; bool inv_key; struct sa_tfm_ctx *ctx; + bool keyed_mac; + void (*prep_iopad)(struct algo_data *algo, const u8 *key, + u16 key_sz, __be32 *ipad, __be32 *opad); }; /** @@ -128,6 +137,7 @@ struct sa_alg_tmpl { union { struct skcipher_alg skcipher; struct ahash_alg ahash; + struct aead_alg aead; } alg; bool registered; }; @@ -234,6 +244,38 @@ static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, }; +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for encryption + */ +static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = { + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for decryption + */ +static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = { + { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + /* * Mode Control Instructions for various Key lengths 128, 192, 256 * For ECB (Electronic Code Book) mode for encryption @@ -313,6 +355,82 @@ static void sa_swiz_128(u8 *in, u16 len) } } +/* Prepare the ipad and opad from key as per SHA algorithm step 1*/ +static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz) +{ + int i; + + for (i = 0; i < key_sz; i++) { + k_ipad[i] = key[i] ^ 0x36; + k_opad[i] = key[i] ^ 0x5c; + } + + /* Instead of XOR with 0 */ + for (; i < SHA1_BLOCK_SIZE; i++) { + k_ipad[i] = 0x36; + k_opad[i] = 0x5c; + } +} + +static void sa_export_shash(struct shash_desc *hash, int block_size, + int digest_size, __be32 *out) +{ + union { + struct sha1_state sha1; + struct sha256_state sha256; + struct sha512_state sha512; + } sha; + void *state; + u32 *result; + int i; + + switch (digest_size) { + case SHA1_DIGEST_SIZE: + state = &sha.sha1; + result = sha.sha1.state; + break; + case SHA256_DIGEST_SIZE: + state = &sha.sha256; + result = sha.sha256.state; + break; + default: + dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__, + digest_size); + return; + } + + crypto_shash_export(hash, state); + + for (i = 0; i < digest_size >> 2; i++) + out[i] = cpu_to_be32(result[i]); +} + +static void sa_prepare_iopads(struct algo_data *data, const u8 *key, + u16 key_sz, __be32 *ipad, __be32 *opad) +{ + SHASH_DESC_ON_STACK(shash, data->ctx->shash); + int block_size = crypto_shash_blocksize(data->ctx->shash); + int digest_size = crypto_shash_digestsize(data->ctx->shash); + u8 k_ipad[SHA1_BLOCK_SIZE]; + u8 k_opad[SHA1_BLOCK_SIZE]; + + shash->tfm = data->ctx->shash; + + prepare_kiopad(k_ipad, k_opad, key, key_sz); + + memzero_explicit(ipad, block_size); + memzero_explicit(opad, block_size); + + crypto_shash_init(shash); + crypto_shash_update(shash, k_ipad, block_size); + sa_export_shash(shash, block_size, digest_size, ipad); + + crypto_shash_init(shash); + crypto_shash_update(shash, k_opad, block_size); + + sa_export_shash(shash, block_size, digest_size, opad); +} + /* Derive the inverse key used in AES-CBC decryption operation */ static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz) { @@ -383,14 +501,26 @@ static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz, static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz, u8 *sc_buf) { + __be32 ipad[64], opad[64]; + /* Set Authentication mode selector to hash processing */ sc_buf[0] = SA_HASH_PROCESSING; /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */ sc_buf[1] = SA_UPLOAD_HASH_TO_TLR; sc_buf[1] |= ad->auth_ctrl; - /* basic hash */ - sc_buf[1] |= SA_BASIC_HASH; + /* Copy the keys or ipad/opad */ + if (ad->keyed_mac) { + ad->prep_iopad(ad, key, key_sz, ipad, opad); + + /* Copy ipad to AuthKey */ + memcpy(&sc_buf[32], ipad, ad->hash_size); + /* Copy opad to Aux-1 */ + memcpy(&sc_buf[64], opad, ad->hash_size); + } else { + /* basic hash */ + sc_buf[1] |= SA_BASIC_HASH; + } } static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16) @@ -420,16 +550,18 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, /* Iniialize the command update structure */ memzero_explicit(upd_info, sizeof(*upd_info)); - if (cfg->enc_eng_id) - total = SA_CMDL_HEADER_SIZE_BYTES; + if (cfg->enc_eng_id && cfg->auth_eng_id) { + if (cfg->enc) { + auth_offset = SA_CMDL_HEADER_SIZE_BYTES; + enc_next_eng = cfg->auth_eng_id; - if (cfg->auth_eng_id) - total = SA_CMDL_HEADER_SIZE_BYTES; - - if (cfg->iv_size) - total += cfg->iv_size; - - enc_next_eng = SA_ENG_ID_OUTPORT2; + if (cfg->iv_size) + auth_offset += cfg->iv_size; + } else { + enc_offset = SA_CMDL_HEADER_SIZE_BYTES; + auth_next_eng = cfg->enc_eng_id; + } + } if (cfg->enc_eng_id) { upd_info->flags |= SA_CMDL_UPD_ENC; @@ -450,11 +582,11 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] = (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3)); - enc_offset += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size; + total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size; } else { cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] = SA_CMDL_HEADER_SIZE_BYTES; - enc_offset += SA_CMDL_HEADER_SIZE_BYTES; + total += SA_CMDL_HEADER_SIZE_BYTES; } } @@ -562,23 +694,28 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, int auth_sc_offset = 0; u8 *sc_buf = ctx->sc; u16 sc_id = ctx->sc_id; - u8 first_engine; + u8 first_engine = 0; memzero_explicit(sc_buf, SA_CTX_MAX_SZ); - if (ad->enc_eng.eng_id) { - enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; - first_engine = ad->enc_eng.eng_id; - sc_buf[1] = SA_SCCTL_FE_ENC; - ad->hash_size = ad->iv_out_size; - } else { + if (ad->auth_eng.eng_id) { + if (enc) + first_engine = ad->enc_eng.eng_id; + else + first_engine = ad->auth_eng.eng_id; + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size; - first_engine = ad->auth_eng.eng_id; sc_buf[1] = SA_SCCTL_FE_AUTH_ENC; if (!ad->hash_size) return -EINVAL; ad->hash_size = roundup(ad->hash_size, 8); + + } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) { + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + first_engine = ad->enc_eng.eng_id; + sc_buf[1] = SA_SCCTL_FE_ENC; + ad->hash_size = ad->iv_out_size; } /* SCCTL Owner info: 0=host, 1=CP_ACE */ @@ -1491,6 +1628,305 @@ static void sa_sha_cra_exit(struct crypto_tfm *tfm) crypto_free_ahash(ctx->fallback.ahash); } +static void sa_aead_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct aead_request *req; + struct crypto_aead *tfm; + unsigned int start; + unsigned int authsize; + u8 auth_tag[SA_MAX_AUTH_TAG_SZ]; + size_t pl, ml; + int i, sglen; + int err = 0; + u16 auth_len; + u32 *mdptr; + bool diff_dst; + enum dma_data_direction dir_src; + + req = container_of(rxd->req, struct aead_request, base); + tfm = crypto_aead_reqtfm(req); + start = req->assoclen + req->cryptlen; + authsize = crypto_aead_authsize(tfm); + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + + mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml); + for (i = 0; i < (authsize / 4); i++) + mdptr[i + 4] = swab32(mdptr[i + 4]); + + auth_len = req->assoclen + req->cryptlen; + if (!rxd->enc) + auth_len -= authsize; + + sglen = sg_nents_for_len(rxd->src, auth_len); + dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src); + kfree(rxd->split_src_sg); + + if (diff_dst) { + sglen = sg_nents_for_len(rxd->dst, auth_len); + dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + if (rxd->enc) { + scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize, + 1); + } else { + start -= authsize; + scatterwalk_map_and_copy(auth_tag, req->src, start, authsize, + 0); + + err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0; + } + + kfree(rxd); + + aead_request_complete(req, err); +} + +static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash, + const char *fallback) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + int ret; + + memzero_explicit(ctx, sizeof(*ctx)); + + ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->shash)) { + dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash); + return PTR_ERR(ctx->shash); + } + + ctx->fallback.aead = crypto_alloc_aead(fallback, 0, + CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback.aead)) { + dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n", + fallback); + return PTR_ERR(ctx->fallback.aead); + } + + crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + + crypto_aead_reqsize(ctx->fallback.aead)); + + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + + ret = sa_init_ctx_info(&ctx->dec, data); + if (ret) { + sa_free_ctx_info(&ctx->enc, data); + return ret; + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + return ret; +} + +static int sa_cra_init_aead_sha1(struct crypto_aead *tfm) +{ + return sa_cra_init_aead(tfm, "sha1", + "authenc(hmac(sha1-ce),cbc(aes-ce))"); +} + +static int sa_cra_init_aead_sha256(struct crypto_aead *tfm) +{ + return sa_cra_init_aead(tfm, "sha256", + "authenc(hmac(sha256-ce),cbc(aes-ce))"); +} + +static void sa_exit_tfm_aead(struct crypto_aead *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + crypto_free_shash(ctx->shash); + crypto_free_aead(ctx->fallback.aead); + + sa_free_ctx_info(&ctx->enc, data); + sa_free_ctx_info(&ctx->dec, data); +} + +/* AEAD algorithm configuration interface function */ +static int sa_aead_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen, + struct algo_data *ad) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_authenc_keys keys; + int cmdl_len; + struct sa_cmdl_cfg cfg; + int key_idx; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + return -EINVAL; + + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + key_idx = (keys.enckeylen >> 3) - 2; + if (key_idx >= 3) + return -EINVAL; + + ad->ctx = ctx; + ad->enc_eng.eng_id = SA_ENG_ID_EM1; + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + ad->auth_eng.eng_id = SA_ENG_ID_AM1; + ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ; + ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx]; + ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx]; + ad->inv_key = true; + ad->keyed_mac = true; + ad->ealg_id = SA_EALG_ID_AES_CBC; + ad->prep_iopad = sa_prepare_iopads; + + memset(&cfg, 0, sizeof(cfg)); + cfg.enc = true; + cfg.aalg = ad->aalg_id; + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.auth_eng_id = ad->auth_eng.eng_id; + cfg.iv_size = crypto_aead_ivsize(authenc); + cfg.akey = keys.authkey; + cfg.akey_len = keys.authkeylen; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen, + keys.authkey, keys.authkeylen, + ad, 1, &ctx->enc.epib[1])) + return -EINVAL; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + return -EINVAL; + + ctx->enc.cmdl_size = cmdl_len; + + /* Setup Decryption Security Context & Command label template */ + if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen, + keys.authkey, keys.authkeylen, + ad, 0, &ctx->dec.epib[1])) + return -EINVAL; + + cfg.enc = false; + cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl, + &ctx->dec.cmdl_upd_info); + + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + return -EINVAL; + + ctx->dec.cmdl_size = cmdl_len; + + crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(ctx->fallback.aead, + crypto_aead_get_flags(authenc) & + CRYPTO_TFM_REQ_MASK); + crypto_aead_setkey(ctx->fallback.aead, key, keylen); + + return 0; +} + +static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); + + return crypto_aead_setauthsize(ctx->fallback.aead, authsize); +} + +static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.aalg_id = SA_AALG_ID_HMAC_SHA1; + ad.hash_size = SHA1_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1; + + return sa_aead_setkey(authenc, key, keylen, &ad); +} + +static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256; + ad.hash_size = SHA256_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256; + + return sa_aead_setkey(authenc, key, keylen, &ad); +} + +static int sa_aead_run(struct aead_request *req, u8 *iv, int enc) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_req sa_req = { 0 }; + size_t auth_size, enc_size; + + enc_size = req->cryptlen; + auth_size = req->assoclen + req->cryptlen; + + if (!enc) { + enc_size -= crypto_aead_authsize(tfm); + auth_size -= crypto_aead_authsize(tfm); + } + + if (auth_size > SA_MAX_DATA_SZ || + (auth_size >= SA_UNSAFE_DATA_SZ_MIN && + auth_size <= SA_UNSAFE_DATA_SZ_MAX)) { + struct aead_request *subreq = aead_request_ctx(req); + int ret; + + aead_request_set_tfm(subreq, ctx->fallback.aead); + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + aead_request_set_ad(subreq, req->assoclen); + + ret = enc ? crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); + return ret; + } + + sa_req.enc_offset = req->assoclen; + sa_req.enc_size = enc_size; + sa_req.auth_size = auth_size; + sa_req.size = auth_size; + sa_req.enc_iv = iv; + sa_req.type = CRYPTO_ALG_TYPE_AEAD; + sa_req.enc = enc; + sa_req.callback = sa_aead_dma_in_callback; + sa_req.mdata_size = 52; + sa_req.base = &req->base; + sa_req.ctx = ctx; + sa_req.src = req->src; + sa_req.dst = req->dst; + + return sa_run(&sa_req); +} + +/* AEAD algorithm encrypt interface function */ +static int sa_aead_encrypt(struct aead_request *req) +{ + return sa_aead_run(req, req->iv, 1); +} + +/* AEAD algorithm decrypt interface function */ +static int sa_aead_decrypt(struct aead_request *req) +{ + return sa_aead_run(req, req->iv, 0); +} + static struct sa_alg_tmpl sa_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, @@ -1669,6 +2105,61 @@ static struct sa_alg_tmpl sa_algs[] = { .import = sa_sha_import, }, }, + { + .type = CRYPTO_ALG_TYPE_AEAD, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha1),cbc(aes))-sa2ul", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_priority = 3000, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + + .init = sa_cra_init_aead_sha1, + .exit = sa_exit_tfm_aead, + .setkey = sa_aead_cbc_sha1_setkey, + .setauthsize = sa_aead_setauthsize, + .encrypt = sa_aead_encrypt, + .decrypt = sa_aead_decrypt, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AEAD, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha256),cbc(aes))-sa2ul", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0, + .cra_priority = 3000, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + + .init = sa_cra_init_aead_sha256, + .exit = sa_exit_tfm_aead, + .setkey = sa_aead_cbc_sha256_setkey, + .setauthsize = sa_aead_setauthsize, + .encrypt = sa_aead_encrypt, + .decrypt = sa_aead_decrypt, + }, + }, }; /* Register the algorithms in crypto framework */ @@ -1686,6 +2177,9 @@ static void sa_register_algos(const struct device *dev) } else if (type == CRYPTO_ALG_TYPE_AHASH) { alg_name = sa_algs[i].alg.ahash.halg.base.cra_name; err = crypto_register_ahash(&sa_algs[i].alg.ahash); + } else if (type == CRYPTO_ALG_TYPE_AEAD) { + alg_name = sa_algs[i].alg.aead.base.cra_name; + err = crypto_register_aead(&sa_algs[i].alg.aead); } else { dev_err(dev, "un-supported crypto algorithm (%d)", @@ -1714,6 +2208,8 @@ static void sa_unregister_algos(const struct device *dev) crypto_unregister_skcipher(&sa_algs[i].alg.skcipher); else if (type == CRYPTO_ALG_TYPE_AHASH) crypto_unregister_ahash(&sa_algs[i].alg.ahash); + else if (type == CRYPTO_ALG_TYPE_AEAD) + crypto_unregister_aead(&sa_algs[i].alg.aead); sa_algs[i].registered = false; } diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h index dc5e3470c3a0..7f7e3fe60d11 100644 --- a/drivers/crypto/sa2ul.h +++ b/drivers/crypto/sa2ul.h @@ -313,6 +313,7 @@ struct sa_tfm_ctx { union { struct crypto_sync_skcipher *skcipher; struct crypto_ahash *ahash; + struct crypto_aead *aead; } fallback; }; From fd92028e4c0cbbe498cc622369baff6b395e88e6 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 13 Jul 2020 11:34:25 +0300 Subject: [PATCH 121/157] crypto: sa2ul - add device links to child devices The child devices for sa2ul (like the RNG) have hard dependency towards the parent, they can't function without the parent enabled. Add device link for this purpose so that the dependencies are taken care of properly. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/sa2ul.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index fb4c0aba9048..ebcdffcdb686 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -2302,6 +2302,15 @@ err_dma_tx: return ret; } +static int sa_link_child(struct device *dev, void *data) +{ + struct device *parent = data; + + device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER); + + return 0; +} + static int sa_ul_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2352,6 +2361,8 @@ static int sa_ul_probe(struct platform_device *pdev) if (ret) goto release_dma; + device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child); + return 0; release_dma: From c06c76602e03bde24ee69a2022a829127e504202 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 13 Jul 2020 07:06:34 -0700 Subject: [PATCH 122/157] crypto: qat - fix double free in qat_uclo_create_batch_init_list clang static analysis flags this error qat_uclo.c:297:3: warning: Attempt to free released memory [unix.Malloc] kfree(*init_tab_base); ^~~~~~~~~~~~~~~~~~~~~ When input *init_tab_base is null, the function allocates memory for the head of the list. When there is problem allocating other list elements the list is unwound and freed. Then a check is made if the list head was allocated and is also freed. Keeping track of the what may need to be freed is the variable 'tail_old'. The unwinding/freeing block is while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } The problem is that the first element of tail_old is also what was allocated for the list head init_header = kzalloc(sizeof(*init_header), GFP_KERNEL); ... *init_tab_base = init_header; flag = 1; } tail_old = init_header; So *init_tab_base/init_header are freed twice. There is another problem. When the input *init_tab_base is non null the tail_old is calculated by traveling down the list to first non null entry. tail_old = init_header; while (tail_old->next) tail_old = tail_old->next; When the unwinding free happens, the last entry of the input list will be freed. So the freeing needs a general changed. If locally allocated the first element of tail_old is freed, else it is skipped. As a bit of cleanup, reset *init_tab_base if it came in as null. Fixes: b4b7e67c917f ("crypto: qat - Intel(R) QAT ucode part of fw loader") Cc: Signed-off-by: Tom Rix Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_uclo.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 4cc1f436b075..bff759e2f811 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -288,13 +288,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle } return 0; out_err: + /* Do not free the list head unless we allocated it. */ + tail_old = tail_old->next; + if (flag) { + kfree(*init_tab_base); + *init_tab_base = NULL; + } + while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } - if (flag) - kfree(*init_tab_base); return -ENOMEM; } From bd25b4886ddcebec92591f298ce2ce345d7f2ea9 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:51 -0400 Subject: [PATCH 123/157] padata: remove start function padata_start() is only used right after pcrypt allocates an instance with all possible CPUs, when PADATA_INVALID can't happen, so there's no need for a separate "start" step. It can be done during allocation to save text, make using padata easier, and avoid unneeded calls in the future. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 3 --- include/linux/padata.h | 1 - kernel/padata.c | 26 +------------------------- 3 files changed, 1 insertion(+), 29 deletions(-) diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index cbc383a1a3fe..fb9127ec5357 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -355,9 +355,6 @@ static int __init pcrypt_init(void) if (err) goto err_deinit_pencrypt; - padata_start(pencrypt); - padata_start(pdecrypt); - return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: diff --git a/include/linux/padata.h b/include/linux/padata.h index 7302efff5e65..20294cddc739 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -204,6 +204,5 @@ extern void padata_do_serial(struct padata_priv *padata); extern void __init padata_do_multithreaded(struct padata_mt_job *job); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); #endif diff --git a/kernel/padata.c b/kernel/padata.c index 4373f7adaa40..931762316612 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -789,30 +789,6 @@ out: } EXPORT_SYMBOL(padata_set_cpumask); -/** - * padata_start - start the parallel processing - * - * @pinst: padata instance to start - * - * Return: 0 on success or negative error code - */ -int padata_start(struct padata_instance *pinst) -{ - int err = 0; - - mutex_lock(&pinst->lock); - - if (pinst->flags & PADATA_INVALID) - err = -EINVAL; - - __padata_start(pinst); - - mutex_unlock(&pinst->lock); - - return err; -} -EXPORT_SYMBOL(padata_start); - /** * padata_stop - stop the parallel processing * @@ -1100,7 +1076,7 @@ static struct padata_instance *padata_alloc(const char *name, if (padata_setup_cpumasks(pinst)) goto err_free_rcpumask_cbcpu; - pinst->flags = 0; + __padata_start(pinst); kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); From 350ef051d4edd884e8dea0be9f3685b4b32142fb Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:52 -0400 Subject: [PATCH 124/157] padata: remove stop function padata_stop() has two callers and is unnecessary in both cases. When pcrypt calls it before padata_free(), it's being unloaded so there are no outstanding padata jobs[0]. When __padata_free() calls it, it's either along the same path or else pcrypt initialization failed, which of course means there are also no outstanding jobs. Removing it simplifies padata and saves text. [0] https://lore.kernel.org/linux-crypto/20191119225017.mjrak2fwa5vccazl@gondor.apana.org.au/ Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- Documentation/core-api/padata.rst | 16 ++-------------- crypto/pcrypt.c | 12 +++--------- include/linux/padata.h | 1 - kernel/padata.c | 14 -------------- 4 files changed, 5 insertions(+), 38 deletions(-) diff --git a/Documentation/core-api/padata.rst b/Documentation/core-api/padata.rst index 0830e5b0e821..771d50330e5b 100644 --- a/Documentation/core-api/padata.rst +++ b/Documentation/core-api/padata.rst @@ -31,18 +31,7 @@ padata_instance structure for overall control of how jobs are to be run:: 'name' simply identifies the instance. -There are functions for enabling and disabling the instance:: - - int padata_start(struct padata_instance *pinst); - void padata_stop(struct padata_instance *pinst); - -These functions are setting or clearing the "PADATA_INIT" flag; if that flag is -not set, other functions will refuse to work. padata_start() returns zero on -success (flag set) or -EINVAL if the padata cpumask contains no active CPU -(flag not set). padata_stop() clears the flag and blocks until the padata -instance is unused. - -Finally, complete padata initialization by allocating a padata_shell:: +Then, complete padata initialization by allocating a padata_shell:: struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); @@ -155,11 +144,10 @@ submitted. Destroying ---------- -Cleaning up a padata instance predictably involves calling the three free +Cleaning up a padata instance predictably involves calling the two free functions that correspond to the allocation in reverse:: void padata_free_shell(struct padata_shell *ps); - void padata_stop(struct padata_instance *pinst); void padata_free(struct padata_instance *pinst); It is the user's responsibility to ensure all outstanding jobs are complete diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index fb9127ec5357..2d4ac9d44902 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -327,12 +327,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) return ret; } -static void pcrypt_fini_padata(struct padata_instance *pinst) -{ - padata_stop(pinst); - padata_free(pinst); -} - static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, @@ -358,7 +352,7 @@ static int __init pcrypt_init(void) return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: - pcrypt_fini_padata(pencrypt); + padata_free(pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: @@ -369,8 +363,8 @@ static void __exit pcrypt_exit(void) { crypto_unregister_template(&pcrypt_tmpl); - pcrypt_fini_padata(pencrypt); - pcrypt_fini_padata(pdecrypt); + padata_free(pencrypt); + padata_free(pdecrypt); kset_unregister(pcrypt_kset); } diff --git a/include/linux/padata.h b/include/linux/padata.h index 20294cddc739..7d53208b43da 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -204,5 +204,4 @@ extern void padata_do_serial(struct padata_priv *padata); extern void __init padata_do_multithreaded(struct padata_mt_job *job); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern void padata_stop(struct padata_instance *pinst); #endif diff --git a/kernel/padata.c b/kernel/padata.c index 931762316612..8f55e717ba50 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -789,19 +789,6 @@ out: } EXPORT_SYMBOL(padata_set_cpumask); -/** - * padata_stop - stop the parallel processing - * - * @pinst: padata instance to stop - */ -void padata_stop(struct padata_instance *pinst) -{ - mutex_lock(&pinst->lock); - __padata_stop(pinst); - mutex_unlock(&pinst->lock); -} -EXPORT_SYMBOL(padata_stop); - #ifdef CONFIG_HOTPLUG_CPU static int __padata_add_cpu(struct padata_instance *pinst, int cpu) @@ -883,7 +870,6 @@ static void __padata_free(struct padata_instance *pinst) WARN_ON(!list_empty(&pinst->pslist)); - padata_stop(pinst); free_cpumask_var(pinst->rcpumask.cbcpu); free_cpumask_var(pinst->rcpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu); From cec00e6e120f4f0743d2bf0638418684c74b16e0 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:53 -0400 Subject: [PATCH 125/157] padata: inline single call of pd_setup_cpumasks() pd_setup_cpumasks() has only one caller. Move its contents inline to prepare for the next cleanup. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- kernel/padata.c | 32 +++++++++----------------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/kernel/padata.c b/kernel/padata.c index 8f55e717ba50..27f90a3c4dc6 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -441,28 +441,6 @@ static int padata_setup_cpumasks(struct padata_instance *pinst) return err; } -static int pd_setup_cpumasks(struct parallel_data *pd, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) -{ - int err = -ENOMEM; - - if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) - goto out; - if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) - goto free_pcpu_mask; - - cpumask_copy(pd->cpumask.pcpu, pcpumask); - cpumask_copy(pd->cpumask.cbcpu, cbcpumask); - - return 0; - -free_pcpu_mask: - free_cpumask_var(pd->cpumask.pcpu); -out: - return err; -} - static void __init padata_mt_helper(struct work_struct *w) { struct padata_work *pw = container_of(w, struct padata_work, pw_work); @@ -613,8 +591,14 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) goto err_free_pqueue; pd->ps = ps; - if (pd_setup_cpumasks(pd, pcpumask, cbcpumask)) + + if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) goto err_free_squeue; + if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) + goto err_free_pcpu; + + cpumask_copy(pd->cpumask.pcpu, pcpumask); + cpumask_copy(pd->cpumask.cbcpu, cbcpumask); padata_init_pqueues(pd); padata_init_squeues(pd); @@ -626,6 +610,8 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) return pd; +err_free_pcpu: + free_cpumask_var(pd->cpumask.pcpu); err_free_squeue: free_percpu(pd->squeue); err_free_pqueue: From d69e037bcc4a7e31fdd40ae416aa1bd768dd7d99 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:54 -0400 Subject: [PATCH 126/157] padata: remove effective cpumasks from the instance A padata instance has effective cpumasks that store the user-supplied masks ANDed with the online mask, but this middleman is unnecessary. parallel_data keeps the same information around. Removing this saves text and code churn in future changes. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- include/linux/padata.h | 2 -- kernel/padata.c | 30 +++--------------------------- 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/include/linux/padata.h b/include/linux/padata.h index 7d53208b43da..a941b96b7119 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -167,7 +167,6 @@ struct padata_mt_job { * @serial_wq: The workqueue used for serial work. * @pslist: List of padata_shell objects attached to this instance. * @cpumask: User supplied cpumasks for parallel and serial works. - * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask. * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. @@ -179,7 +178,6 @@ struct padata_instance { struct workqueue_struct *serial_wq; struct list_head pslist; struct padata_cpumask cpumask; - struct padata_cpumask rcpumask; struct kobject kobj; struct mutex lock; u8 flags; diff --git a/kernel/padata.c b/kernel/padata.c index 27f90a3c4dc6..4f0a57e5738c 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -571,13 +571,8 @@ static void padata_init_pqueues(struct parallel_data *pd) static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) { struct padata_instance *pinst = ps->pinst; - const struct cpumask *cbcpumask; - const struct cpumask *pcpumask; struct parallel_data *pd; - cbcpumask = pinst->rcpumask.cbcpu; - pcpumask = pinst->rcpumask.pcpu; - pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); if (!pd) goto err; @@ -597,8 +592,8 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) goto err_free_pcpu; - cpumask_copy(pd->cpumask.pcpu, pcpumask); - cpumask_copy(pd->cpumask.cbcpu, cbcpumask); + cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); + cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); padata_init_pqueues(pd); padata_init_squeues(pd); @@ -668,12 +663,6 @@ static int padata_replace(struct padata_instance *pinst) pinst->flags |= PADATA_RESET; - cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu, - cpu_online_mask); - - cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu, - cpu_online_mask); - list_for_each_entry(ps, &pinst->pslist, list) { err = padata_replace_one(ps); if (err) @@ -856,8 +845,6 @@ static void __padata_free(struct padata_instance *pinst) WARN_ON(!list_empty(&pinst->pslist)); - free_cpumask_var(pinst->rcpumask.cbcpu); - free_cpumask_var(pinst->rcpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); destroy_workqueue(pinst->serial_wq); @@ -1033,20 +1020,13 @@ static struct padata_instance *padata_alloc(const char *name, !padata_validate_cpumask(pinst, cbcpumask)) goto err_free_masks; - if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL)) - goto err_free_masks; - if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL)) - goto err_free_rcpumask_pcpu; - INIT_LIST_HEAD(&pinst->pslist); cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask); - cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask); if (padata_setup_cpumasks(pinst)) - goto err_free_rcpumask_cbcpu; + goto err_free_masks; __padata_start(pinst); @@ -1064,10 +1044,6 @@ static struct padata_instance *padata_alloc(const char *name, return pinst; -err_free_rcpumask_cbcpu: - free_cpumask_var(pinst->rcpumask.cbcpu); -err_free_rcpumask_pcpu: - free_cpumask_var(pinst->rcpumask.pcpu); err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); From 3f257191d31d5eaf154ebdb696efc238837ddd51 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:55 -0400 Subject: [PATCH 127/157] padata: fold padata_alloc_possible() into padata_alloc() There's no reason to have two interfaces when there's only one caller. Removing _possible saves text and simplifies future changes. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- Documentation/core-api/padata.rst | 2 +- crypto/pcrypt.c | 2 +- include/linux/padata.h | 2 +- kernel/padata.c | 33 +++++-------------------------- 4 files changed, 8 insertions(+), 31 deletions(-) diff --git a/Documentation/core-api/padata.rst b/Documentation/core-api/padata.rst index 771d50330e5b..35175710b43c 100644 --- a/Documentation/core-api/padata.rst +++ b/Documentation/core-api/padata.rst @@ -27,7 +27,7 @@ padata_instance structure for overall control of how jobs are to be run:: #include - struct padata_instance *padata_alloc_possible(const char *name); + struct padata_instance *padata_alloc(const char *name); 'name' simply identifies the instance. diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 2d4ac9d44902..d569c7ed6c80 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -316,7 +316,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - *pinst = padata_alloc_possible(name); + *pinst = padata_alloc(name); if (!*pinst) return ret; diff --git a/include/linux/padata.h b/include/linux/padata.h index a941b96b7119..070a7d43e8af 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -192,7 +192,7 @@ extern void __init padata_init(void); static inline void __init padata_init(void) {} #endif -extern struct padata_instance *padata_alloc_possible(const char *name); +extern struct padata_instance *padata_alloc(const char *name); extern void padata_free(struct padata_instance *pinst); extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); extern void padata_free_shell(struct padata_shell *ps); diff --git a/kernel/padata.c b/kernel/padata.c index 4f0a57e5738c..1c0b97891edb 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -979,18 +979,12 @@ static struct kobj_type padata_attr_type = { }; /** - * padata_alloc - allocate and initialize a padata instance and specify - * cpumasks for serial and parallel workers. - * + * padata_alloc - allocate and initialize a padata instance * @name: used to identify the instance - * @pcpumask: cpumask that will be used for padata parallelization - * @cbcpumask: cpumask that will be used for padata serialization * * Return: new instance on success, NULL on error */ -static struct padata_instance *padata_alloc(const char *name, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +struct padata_instance *padata_alloc(const char *name) { struct padata_instance *pinst; @@ -1016,14 +1010,11 @@ static struct padata_instance *padata_alloc(const char *name, free_cpumask_var(pinst->cpumask.pcpu); goto err_free_serial_wq; } - if (!padata_validate_cpumask(pinst, pcpumask) || - !padata_validate_cpumask(pinst, cbcpumask)) - goto err_free_masks; INIT_LIST_HEAD(&pinst->pslist); - cpumask_copy(pinst->cpumask.pcpu, pcpumask); - cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); + cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask); + cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); if (padata_setup_cpumasks(pinst)) goto err_free_masks; @@ -1057,21 +1048,7 @@ err_free_inst: err: return NULL; } - -/** - * padata_alloc_possible - Allocate and initialize padata instance. - * Use the cpu_possible_mask for serial and - * parallel workers. - * - * @name: used to identify the instance - * - * Return: new instance on success, NULL on error - */ -struct padata_instance *padata_alloc_possible(const char *name) -{ - return padata_alloc(name, cpu_possible_mask, cpu_possible_mask); -} -EXPORT_SYMBOL(padata_alloc_possible); +EXPORT_SYMBOL(padata_alloc); /** * padata_free - free a padata instance From f601c725a6ac072608f47083e7c8115a3f504f95 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:56 -0400 Subject: [PATCH 128/157] padata: remove padata_parallel_queue Only its reorder field is actually used now, so remove the struct and embed @reorder directly in parallel_data. No functional change, just a cleanup. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- include/linux/padata.h | 15 ++------------ kernel/padata.c | 46 ++++++++++++++++++------------------------ 2 files changed, 22 insertions(+), 39 deletions(-) diff --git a/include/linux/padata.h b/include/linux/padata.h index 070a7d43e8af..a433f13fc4bf 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -66,17 +66,6 @@ struct padata_serial_queue { struct parallel_data *pd; }; -/** - * struct padata_parallel_queue - The percpu padata parallel queue - * - * @reorder: List to wait for reordering after parallel processing. - * @num_obj: Number of objects that are processed by this cpu. - */ -struct padata_parallel_queue { - struct padata_list reorder; - atomic_t num_obj; -}; - /** * struct padata_cpumask - The cpumasks for the parallel/serial workers * @@ -93,7 +82,7 @@ struct padata_cpumask { * that depends on the cpumask in use. * * @ps: padata_shell object. - * @pqueue: percpu padata queues used for parallelization. + * @reorder_list: percpu reorder lists * @squeue: percpu padata queues used for serialuzation. * @refcnt: Number of objects holding a reference on this parallel_data. * @seq_nr: Sequence number of the parallelized data object. @@ -105,7 +94,7 @@ struct padata_cpumask { */ struct parallel_data { struct padata_shell *ps; - struct padata_parallel_queue __percpu *pqueue; + struct padata_list __percpu *reorder_list; struct padata_serial_queue __percpu *squeue; atomic_t refcnt; unsigned int seq_nr; diff --git a/kernel/padata.c b/kernel/padata.c index 1c0b97891edb..16cb894dc272 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -250,13 +250,11 @@ EXPORT_SYMBOL(padata_do_parallel); static struct padata_priv *padata_find_next(struct parallel_data *pd, bool remove_object) { - struct padata_parallel_queue *next_queue; struct padata_priv *padata; struct padata_list *reorder; int cpu = pd->cpu; - next_queue = per_cpu_ptr(pd->pqueue, cpu); - reorder = &next_queue->reorder; + reorder = per_cpu_ptr(pd->reorder_list, cpu); spin_lock(&reorder->lock); if (list_empty(&reorder->list)) { @@ -291,7 +289,7 @@ static void padata_reorder(struct parallel_data *pd) int cb_cpu; struct padata_priv *padata; struct padata_serial_queue *squeue; - struct padata_parallel_queue *next_queue; + struct padata_list *reorder; /* * We need to ensure that only one cpu can work on dequeueing of @@ -339,9 +337,8 @@ static void padata_reorder(struct parallel_data *pd) */ smp_mb(); - next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); - if (!list_empty(&next_queue->reorder.list) && - padata_find_next(pd, false)) + reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); + if (!list_empty(&reorder->list) && padata_find_next(pd, false)) queue_work(pinst->serial_wq, &pd->reorder_work); } @@ -401,17 +398,16 @@ void padata_do_serial(struct padata_priv *padata) { struct parallel_data *pd = padata->pd; int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); - struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue, - hashed_cpu); + struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); struct padata_priv *cur; - spin_lock(&pqueue->reorder.lock); + spin_lock(&reorder->lock); /* Sort in ascending order of sequence number. */ - list_for_each_entry_reverse(cur, &pqueue->reorder.list, list) + list_for_each_entry_reverse(cur, &reorder->list, list) if (cur->seq_nr < padata->seq_nr) break; list_add(&padata->list, &cur->list); - spin_unlock(&pqueue->reorder.lock); + spin_unlock(&reorder->lock); /* * Ensure the addition to the reorder list is ordered correctly @@ -553,17 +549,15 @@ static void padata_init_squeues(struct parallel_data *pd) } } -/* Initialize all percpu queues used by parallel workers */ -static void padata_init_pqueues(struct parallel_data *pd) +/* Initialize per-CPU reorder lists */ +static void padata_init_reorder_list(struct parallel_data *pd) { int cpu; - struct padata_parallel_queue *pqueue; + struct padata_list *list; for_each_cpu(cpu, pd->cpumask.pcpu) { - pqueue = per_cpu_ptr(pd->pqueue, cpu); - - __padata_list_init(&pqueue->reorder); - atomic_set(&pqueue->num_obj, 0); + list = per_cpu_ptr(pd->reorder_list, cpu); + __padata_list_init(list); } } @@ -577,13 +571,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) if (!pd) goto err; - pd->pqueue = alloc_percpu(struct padata_parallel_queue); - if (!pd->pqueue) + pd->reorder_list = alloc_percpu(struct padata_list); + if (!pd->reorder_list) goto err_free_pd; pd->squeue = alloc_percpu(struct padata_serial_queue); if (!pd->squeue) - goto err_free_pqueue; + goto err_free_reorder_list; pd->ps = ps; @@ -595,7 +589,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); - padata_init_pqueues(pd); + padata_init_reorder_list(pd); padata_init_squeues(pd); pd->seq_nr = -1; atomic_set(&pd->refcnt, 1); @@ -609,8 +603,8 @@ err_free_pcpu: free_cpumask_var(pd->cpumask.pcpu); err_free_squeue: free_percpu(pd->squeue); -err_free_pqueue: - free_percpu(pd->pqueue); +err_free_reorder_list: + free_percpu(pd->reorder_list); err_free_pd: kfree(pd); err: @@ -621,7 +615,7 @@ static void padata_free_pd(struct parallel_data *pd) { free_cpumask_var(pd->cpumask.pcpu); free_cpumask_var(pd->cpumask.cbcpu); - free_percpu(pd->pqueue); + free_percpu(pd->reorder_list); free_percpu(pd->squeue); kfree(pd); } From b526ee38d1799dffd56038faec877df506519914 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Wed, 15 Jul 2020 18:26:00 +0300 Subject: [PATCH 129/157] dt-bindings: rng: add RNGB compatibles for i.MX6 SoCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RNGB block is found in some i.MX6 SoCs - 6SL, 6SLL, 6ULL, 6ULZ. Add corresponding compatible strings. Note: Several NXP SoC from QorIQ family (P1010, P1023, P4080, P3041, P5020) also have a RNGB, however it's part of the CAAM (Cryptograhic Accelerator and Assurance Module) crypto accelerator. In this case, RNGB is managed in the caam driver (drivers/crypto/caam/), since it's tightly related to the caam "job ring" interface, not to mention CAAM internally relying on RNGB as source of randomness. On the other hand, the i.MX6 SoCs with RNGB have a DCP (Data Co-Processor) crypto accelerator and this block and RNGB are independent. Signed-off-by: Horia Geantă Reviewed-by: Rob Herring Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/rng/imx-rng.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/rng/imx-rng.txt b/Documentation/devicetree/bindings/rng/imx-rng.txt index 405c2b00ccb0..659d4efdd664 100644 --- a/Documentation/devicetree/bindings/rng/imx-rng.txt +++ b/Documentation/devicetree/bindings/rng/imx-rng.txt @@ -5,6 +5,9 @@ Required properties: "fsl,imx21-rnga" "fsl,imx31-rnga" (backward compatible with "fsl,imx21-rnga") "fsl,imx25-rngb" + "fsl,imx6sl-rngb" (backward compatible with "fsl,imx25-rngb") + "fsl,imx6sll-rngb" (backward compatible with "fsl,imx25-rngb") + "fsl,imx6ull-rngb" (backward compatible with "fsl,imx25-rngb") "fsl,imx35-rngc" - reg : offset and length of the register set of this block - interrupts : the interrupt number for the RNG block From d1e83158738f57027e22f43d8b3f4e58f3373814 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Wed, 15 Jul 2020 18:26:04 +0300 Subject: [PATCH 130/157] hwrng: imx-rngc - enable driver for i.MX6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit i.MX6 SL, SLL, ULL, ULZ SoCs have an RNGB block. Since imx-rngc driver supports also rngb, let's enable it for these SoCs too. Signed-off-by: Horia Geantă Reviewed-by: Martin Kaiser Reviewed-by: Marco Felsch Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 8478eb757f3c..98f95a09ce55 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -255,7 +255,7 @@ config HW_RANDOM_MXC_RNGA config HW_RANDOM_IMX_RNGC tristate "Freescale i.MX RNGC Random Number Generator" depends on HAS_IOMEM && HAVE_CLK - depends on SOC_IMX25 || COMPILE_TEST + depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number From ac855b3c1069dc7c6c1442e743600408ec474c36 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 16 Jul 2020 18:06:56 +1000 Subject: [PATCH 131/157] crypto: omap-aes - Fix sparse and compiler warnings This patch fixes all the sparse and W=1 compiler warnings in the driver. Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 25154b74dcc6..4fd14d90cc40 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -139,7 +139,7 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd) for (i = 0; i < key32; i++) { omap_aes_write(dd, AES_REG_KEY(dd, i), - __le32_to_cpu(dd->ctx->key[i])); + (__force u32)cpu_to_le32(dd->ctx->key[i])); } if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv) @@ -363,7 +363,7 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) { int err; - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zu\n", dd->total); if (!dd->pio_only) { err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, @@ -409,7 +409,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zu\n", dd->total); omap_aes_dma_stop(dd); From da087a4cdcbf93c8a6a9decab4a34c38e6c7e867 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:35:33 -0700 Subject: [PATCH 132/157] crypto: hash - drop duplicated word in a comment Drop the doubled word "in" in a comment. Signed-off-by: Randy Dunlap Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Acked-by: David S. Miller Signed-off-by: Herbert Xu --- include/crypto/hash.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 4829d2367eda..19ce91f2359f 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -687,7 +687,7 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, * The message digest API is able to maintain state information for the * caller. * - * The synchronous message digest API can store user-related context in in its + * The synchronous message digest API can store user-related context in its * shash_desc request data structure. */ From dd3240a28c08f8092386c76951408966bf4005ff Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:35:49 -0700 Subject: [PATCH 133/157] crypto: skcipher - drop duplicated word in kernel-doc Drop the doubled word "request" in a kernel-doc comment. Signed-off-by: Randy Dunlap Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Acked-by: David S. Miller Signed-off-by: Herbert Xu --- include/crypto/skcipher.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 141e7690f9c3..5663f71198b3 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -18,7 +18,7 @@ * @iv: Initialisation Vector * @src: Source SG list * @dst: Destination SG list - * @base: Underlying async request request + * @base: Underlying async request * @__ctx: Start of private context data */ struct skcipher_request { From 9332a9e73918bd0a1d5ef40a3357931b9fe0cf8a Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sun, 19 Jul 2020 18:49:59 +0200 Subject: [PATCH 134/157] crypto: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Herbert Xu --- Documentation/crypto/api-intro.txt | 2 +- Documentation/crypto/userspace-if.rst | 4 +-- arch/arm/crypto/crc32-ce-core.S | 2 +- arch/arm/crypto/sha1-armv4-large.S | 2 +- arch/arm/crypto/sha256-armv4.pl | 2 +- arch/arm/crypto/sha256-core.S_shipped | 2 +- arch/arm/crypto/sha512-armv4.pl | 4 +-- arch/arm/crypto/sha512-core.S_shipped | 4 +-- crypto/Kconfig | 46 +++++++++++++-------------- crypto/blake2b_generic.c | 2 +- crypto/camellia_generic.c | 2 +- crypto/ecc.c | 2 +- crypto/jitterentropy.c | 4 +-- crypto/lrw.c | 2 +- crypto/salsa20_generic.c | 4 +-- crypto/sha3_generic.c | 2 +- 16 files changed, 43 insertions(+), 43 deletions(-) diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt index 45d943fcae5b..40137f93e04f 100644 --- a/Documentation/crypto/api-intro.txt +++ b/Documentation/crypto/api-intro.txt @@ -169,7 +169,7 @@ Portions of this API were derived from the following projects: and; - Nettle (http://www.lysator.liu.se/~nisse/nettle/) + Nettle (https://www.lysator.liu.se/~nisse/nettle/) Niels Möller Original developers of the crypto algorithms: diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst index ff86befa61e0..52019e905900 100644 --- a/Documentation/crypto/userspace-if.rst +++ b/Documentation/crypto/userspace-if.rst @@ -23,7 +23,7 @@ user space, however. This includes the difference between synchronous and asynchronous invocations. The user space API call is fully synchronous. -[1] http://www.chronox.de/libkcapi.html +[1] https://www.chronox.de/libkcapi.html User Space API General Remarks ------------------------------ @@ -384,4 +384,4 @@ Please see [1] for libkcapi which provides an easy-to-use wrapper around the aforementioned Netlink kernel interface. [1] also contains a test application that invokes all libkcapi API calls. -[1] http://www.chronox.de/libkcapi.html +[1] https://www.chronox.de/libkcapi.html diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/crypto/crc32-ce-core.S index 5cbd4a6fedad..3f13a76b9066 100644 --- a/arch/arm/crypto/crc32-ce-core.S +++ b/arch/arm/crypto/crc32-ce-core.S @@ -39,7 +39,7 @@ * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found * at: - * http://www.intel.com/products/processor/manuals/ + * https://www.intel.com/products/processor/manuals/ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2B: Instruction Set Reference, N-Z * diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index f82cd8cf5a09..1c8b685149f2 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S @@ -13,7 +13,7 @@ @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. +@ details see https://www.openssl.org/~appro/cryptogams/. @ ==================================================================== @ sha1_block procedure for ARMv4. diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index a03cf4dfb781..9f96ff48e4a8 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -13,7 +13,7 @@ # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. +# details see https://www.openssl.org/~appro/cryptogams/. # ==================================================================== # SHA256 block procedure for ARMv4. May 2007. diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 054aae0edfce..ea04b2ab0c33 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -12,7 +12,7 @@ @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. +@ details see https://www.openssl.org/~appro/cryptogams/. @ ==================================================================== @ SHA256 block procedure for ARMv4. May 2007. diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index 788c17b56ecc..69df68981acd 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -13,7 +13,7 @@ # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. +# details see https://www.openssl.org/~appro/cryptogams/. # ==================================================================== # SHA512 block procedure for ARMv4. September 2007. @@ -43,7 +43,7 @@ # terms it's 22.6 cycles per byte, which is disappointing result. # Technical writers asserted that 3-way S4 pipeline can sustain # multiple NEON instructions per cycle, but dual NEON issue could -# not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html +# not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html # for further details. On side note Cortex-A15 processes one byte in # 16 cycles. diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index 710ea309769e..cb147db5cbfe 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -12,7 +12,7 @@ @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. +@ details see https://www.openssl.org/~appro/cryptogams/. @ ==================================================================== @ SHA512 block procedure for ARMv4. September 2007. @@ -42,7 +42,7 @@ @ terms it's 22.6 cycles per byte, which is disappointing result. @ Technical writers asserted that 3-way S4 pipeline can sustain @ multiple NEON instructions per cycle, but dual NEON issue could -@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html +@ not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html @ for further details. On side note Cortex-A15 processes one byte in @ 16 cycles. diff --git a/crypto/Kconfig b/crypto/Kconfig index 091c0a0bbf26..1b57419fa2e7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -548,7 +548,7 @@ config CRYPTO_XCBC select CRYPTO_MANAGER help XCBC: Keyed-Hashing with encryption algorithm - http://www.ietf.org/rfc/rfc3566.txt + https://www.ietf.org/rfc/rfc3566.txt http://csrc.nist.gov/encryption/modes/proposedmodes/ xcbc-mac/xcbc-mac-spec.pdf @@ -561,7 +561,7 @@ config CRYPTO_VMAC very high speed on 64-bit architectures. See also: - + comment "Digest" @@ -816,7 +816,7 @@ config CRYPTO_RMD128 RIPEMD-160 should be used. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD160 tristate "RIPEMD-160 digest algorithm" @@ -833,7 +833,7 @@ config CRYPTO_RMD160 against RIPEMD-160. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD256 tristate "RIPEMD-256 digest algorithm" @@ -845,7 +845,7 @@ config CRYPTO_RMD256 (than RIPEMD-128). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD320 tristate "RIPEMD-320 digest algorithm" @@ -857,7 +857,7 @@ config CRYPTO_RMD320 (than RIPEMD-160). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_SHA1 tristate "SHA1 digest algorithm" @@ -1045,7 +1045,7 @@ config CRYPTO_TGR192 Tiger was developed by Ross Anderson and Eli Biham. See also: - . + . config CRYPTO_WP512 tristate "Whirlpool digest algorithms" @@ -1221,7 +1221,7 @@ config CRYPTO_BLOWFISH designed for use on "large microprocessors". See also: - + config CRYPTO_BLOWFISH_COMMON tristate @@ -1230,7 +1230,7 @@ config CRYPTO_BLOWFISH_COMMON generic c and the assembler implementations. See also: - + config CRYPTO_BLOWFISH_X86_64 tristate "Blowfish cipher algorithm (x86_64)" @@ -1245,7 +1245,7 @@ config CRYPTO_BLOWFISH_X86_64 designed for use on "large microprocessors". See also: - + config CRYPTO_CAMELLIA tristate "Camellia cipher algorithms" @@ -1441,10 +1441,10 @@ config CRYPTO_SALSA20 Salsa20 stream cipher algorithm. Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT - Stream Cipher Project. See + Stream Cipher Project. See The Salsa20 stream cipher algorithm is designed by Daniel J. - Bernstein . See + Bernstein . See config CRYPTO_CHACHA20 tristate "ChaCha stream cipher algorithms" @@ -1456,7 +1456,7 @@ config CRYPTO_CHACHA20 ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. Bernstein and further specified in RFC7539 for use in IETF protocols. This is the portable C implementation of ChaCha20. See also: - + XChaCha20 is the application of the XSalsa20 construction to ChaCha20 rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length @@ -1509,7 +1509,7 @@ config CRYPTO_SERPENT variant of Serpent for compatibility with old kerneli.org code. See also: - + config CRYPTO_SERPENT_SSE2_X86_64 tristate "Serpent cipher algorithm (x86_64/SSE2)" @@ -1528,7 +1528,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 blocks parallel using SSE2 instruction set. See also: - + config CRYPTO_SERPENT_SSE2_586 tristate "Serpent cipher algorithm (i586/SSE2)" @@ -1547,7 +1547,7 @@ config CRYPTO_SERPENT_SSE2_586 blocks parallel using SSE2 instruction set. See also: - + config CRYPTO_SERPENT_AVX_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX)" @@ -1567,7 +1567,7 @@ config CRYPTO_SERPENT_AVX_X86_64 eight blocks parallel using the AVX instruction set. See also: - + config CRYPTO_SERPENT_AVX2_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX2)" @@ -1583,7 +1583,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 blocks parallel using AVX2 instruction set. See also: - + config CRYPTO_SM4 tristate "SM4 cipher algorithm" @@ -1640,7 +1640,7 @@ config CRYPTO_TWOFISH bits. See also: - + config CRYPTO_TWOFISH_COMMON tristate @@ -1662,7 +1662,7 @@ config CRYPTO_TWOFISH_586 bits. See also: - + config CRYPTO_TWOFISH_X86_64 tristate "Twofish cipher algorithm (x86_64)" @@ -1678,7 +1678,7 @@ config CRYPTO_TWOFISH_X86_64 bits. See also: - + config CRYPTO_TWOFISH_X86_64_3WAY tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" @@ -1699,7 +1699,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY blocks parallel, utilizing resources of out-of-order CPUs better. See also: - + config CRYPTO_TWOFISH_AVX_X86_64 tristate "Twofish cipher algorithm (x86_64/AVX)" @@ -1722,7 +1722,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 eight blocks parallel using the AVX Instruction Set. See also: - + comment "Compression" diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 0ffd8d92e308..a2ffe60e06d3 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -8,7 +8,7 @@ * * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 * - OpenSSL license : https://www.openssl.org/source/license.html - * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 + * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * * More information about the BLAKE2 hash function can be found at * https://blake2.net. diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 9a5783e5196a..0b9f409f7370 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -6,7 +6,7 @@ /* * Algorithm Specification - * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html + * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ /* diff --git a/crypto/ecc.c b/crypto/ecc.c index 02d35be7702b..86c324936a2b 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -940,7 +940,7 @@ static bool ecc_point_is_zero(const struct ecc_point *point) } /* Point multiplication algorithm using Montgomery's ladder with co-Z - * coordinates. From http://eprint.iacr.org/2011/338.pdf + * coordinates. From https://eprint.iacr.org/2011/338.pdf */ /* Double in place */ diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 57f4a1ac738b..6e147c43fc18 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -7,7 +7,7 @@ * Design * ====== * - * See http://www.chronox.de/jent.html + * See https://www.chronox.de/jent.html * * License * ======= @@ -47,7 +47,7 @@ /* * This Jitterentropy RNG is based on the jitterentropy library - * version 2.2.0 provided at http://www.chronox.de/jent.html + * version 2.2.0 provided at https://www.chronox.de/jent.html */ #ifdef __OPTIMIZE__ diff --git a/crypto/lrw.c b/crypto/lrw.c index 3f90a5ec28f7..bcf09fbc750a 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -9,7 +9,7 @@ */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at - * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html + * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index c81a44404086..3418869dabef 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -9,8 +9,8 @@ * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream * Cipher Project. It is designed by Daniel J. Bernstein . * More information about eSTREAM and Salsa20 can be found here: - * http://www.ecrypt.eu.org/stream/ - * http://cr.yp.to/snuffle.html + * https://www.ecrypt.eu.org/stream/ + * https://cr.yp.to/snuffle.html * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 44e263e25599..3e4069935b53 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -3,7 +3,7 @@ * Cryptographic API. * * SHA-3, as specified in - * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf * * SHA-3 code by Jeff Garzik * Ard Biesheuvel From e493b31a5855e40558cd7883572e6ac904121127 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 19 Jul 2020 11:07:50 -0700 Subject: [PATCH 135/157] crypto: testmgr - delete duplicated words Delete the doubled word "from" in multiple places. Signed-off-by: Randy Dunlap Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/testmgr.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d29983908c38..b9a2d73d9f8d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3916,7 +3916,7 @@ static const struct hash_testvec hmac_sm3_tv_template[] = { }; /* - * SHA1 test vectors from from FIPS PUB 180-1 + * SHA1 test vectors from FIPS PUB 180-1 * Long vector from CAVS 5.0 */ static const struct hash_testvec sha1_tv_template[] = { @@ -4103,7 +4103,7 @@ static const struct hash_testvec sha1_tv_template[] = { /* - * SHA224 test vectors from from FIPS PUB 180-2 + * SHA224 test vectors from FIPS PUB 180-2 */ static const struct hash_testvec sha224_tv_template[] = { { @@ -4273,7 +4273,7 @@ static const struct hash_testvec sha224_tv_template[] = { }; /* - * SHA256 test vectors from from NIST + * SHA256 test vectors from NIST */ static const struct hash_testvec sha256_tv_template[] = { { @@ -4442,7 +4442,7 @@ static const struct hash_testvec sha256_tv_template[] = { }; /* - * SHA384 test vectors from from NIST and kerneli + * SHA384 test vectors from NIST and kerneli */ static const struct hash_testvec sha384_tv_template[] = { { @@ -4632,7 +4632,7 @@ static const struct hash_testvec sha384_tv_template[] = { }; /* - * SHA512 test vectors from from NIST and kerneli + * SHA512 test vectors from NIST and kerneli */ static const struct hash_testvec sha512_tv_template[] = { { From ef19f826eceabdef3a710958cbf3549355267645 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 20 Jul 2020 15:51:57 +0200 Subject: [PATCH 136/157] crypto: x86 - Put back integer parts of include/asm/inst.h Resolves conflict with the tip tree. Fixes: d7866e503bdc ("crypto: x86 - Remove include/asm/inst.h") CC: Thomas Gleixner CC: Ingo Molnar CC: Borislav Petkov CC: "H. Peter Anvin" CC: Stephen Rothwell , CC: "Chang S. Bae" , CC: Peter Zijlstra , CC: Sasha Levin Signed-off-by: Uros Bizjak Signed-off-by: Herbert Xu --- arch/x86/include/asm/inst.h | 148 ++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 arch/x86/include/asm/inst.h diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h new file mode 100644 index 000000000000..438ccd4f3cc4 --- /dev/null +++ b/arch/x86/include/asm/inst.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generate .byte code for some instructions not supported by old + * binutils. + */ +#ifndef X86_ASM_INST_H +#define X86_ASM_INST_H + +#ifdef __ASSEMBLY__ + +#define REG_NUM_INVALID 100 + +#define REG_TYPE_R32 0 +#define REG_TYPE_R64 1 +#define REG_TYPE_INVALID 100 + + .macro R32_NUM opd r32 + \opd = REG_NUM_INVALID + .ifc \r32,%eax + \opd = 0 + .endif + .ifc \r32,%ecx + \opd = 1 + .endif + .ifc \r32,%edx + \opd = 2 + .endif + .ifc \r32,%ebx + \opd = 3 + .endif + .ifc \r32,%esp + \opd = 4 + .endif + .ifc \r32,%ebp + \opd = 5 + .endif + .ifc \r32,%esi + \opd = 6 + .endif + .ifc \r32,%edi + \opd = 7 + .endif +#ifdef CONFIG_X86_64 + .ifc \r32,%r8d + \opd = 8 + .endif + .ifc \r32,%r9d + \opd = 9 + .endif + .ifc \r32,%r10d + \opd = 10 + .endif + .ifc \r32,%r11d + \opd = 11 + .endif + .ifc \r32,%r12d + \opd = 12 + .endif + .ifc \r32,%r13d + \opd = 13 + .endif + .ifc \r32,%r14d + \opd = 14 + .endif + .ifc \r32,%r15d + \opd = 15 + .endif +#endif + .endm + + .macro R64_NUM opd r64 + \opd = REG_NUM_INVALID +#ifdef CONFIG_X86_64 + .ifc \r64,%rax + \opd = 0 + .endif + .ifc \r64,%rcx + \opd = 1 + .endif + .ifc \r64,%rdx + \opd = 2 + .endif + .ifc \r64,%rbx + \opd = 3 + .endif + .ifc \r64,%rsp + \opd = 4 + .endif + .ifc \r64,%rbp + \opd = 5 + .endif + .ifc \r64,%rsi + \opd = 6 + .endif + .ifc \r64,%rdi + \opd = 7 + .endif + .ifc \r64,%r8 + \opd = 8 + .endif + .ifc \r64,%r9 + \opd = 9 + .endif + .ifc \r64,%r10 + \opd = 10 + .endif + .ifc \r64,%r11 + \opd = 11 + .endif + .ifc \r64,%r12 + \opd = 12 + .endif + .ifc \r64,%r13 + \opd = 13 + .endif + .ifc \r64,%r14 + \opd = 14 + .endif + .ifc \r64,%r15 + \opd = 15 + .endif +#endif + .endm + + .macro REG_TYPE type reg + R32_NUM reg_type_r32 \reg + R64_NUM reg_type_r64 \reg + .if reg_type_r64 <> REG_NUM_INVALID + \type = REG_TYPE_R64 + .elseif reg_type_r32 <> REG_NUM_INVALID + \type = REG_TYPE_R32 + .else + \type = REG_TYPE_INVALID + .endif + .endm + + .macro PFX_REX opd1 opd2 W=0 + .if ((\opd1 | \opd2) & 8) || \W + .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3) + .endif + .endm + + .macro MODRM mod opd1 opd2 + .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3) + .endm +#endif + +#endif From e7d2b41e5c773c1e00f0f30519b9790ba7e4a58c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stephan=20M=C3=BCller?= Date: Mon, 20 Jul 2020 19:07:48 +0200 Subject: [PATCH 137/157] crypto: ecdh - check validity of Z before export SP800-56A rev3 section 5.7.1.2 step 2 mandates that the validity of the calculated shared secret is verified before the data is returned to the caller. Thus, the export function and the validity check functions are reversed. In addition, the sensitive variables of priv and rand_z are zeroized. Signed-off-by: Stephan Mueller Reviewed-by: Vitaly Chikunov Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/ecc.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/crypto/ecc.c b/crypto/ecc.c index 86c324936a2b..c8b259e59704 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -1495,11 +1495,16 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); + if (ecc_point_is_zero(product)) { + ret = -EFAULT; + goto err_validity; + } + ecc_swap_digits(product->x, secret, ndigits); - if (ecc_point_is_zero(product)) - ret = -EFAULT; - +err_validity: + memzero_explicit(priv, sizeof(priv)); + memzero_explicit(rand_z, sizeof(rand_z)); ecc_free_point(product); err_alloc_product: ecc_free_point(pk); From 4278e9d99e38938a7611b927fa4d73e6c86cb4fc Mon Sep 17 00:00:00 2001 From: Marcelo Henrique Cerri Date: Mon, 20 Jul 2020 19:08:09 +0200 Subject: [PATCH 138/157] lib/mpi: Add mpi_sub_ui() Add mpi_sub_ui() based on Gnu MP mpz_sub_ui() function from file mpz/aors_ui.h[1] from change id 510b83519d1c adapting the code to the kernel's data structures, helper functions and coding style and also removing the defines used to produce mpz_sub_ui() and mpz_add_ui() from the same code. [1] https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors.h Signed-off-by: Marcelo Henrique Cerri Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- include/linux/mpi.h | 3 ++ lib/mpi/Makefile | 1 + lib/mpi/mpi-sub-ui.c | 78 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 lib/mpi/mpi-sub-ui.c diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 7bd6d8af0004..5d906dfbf3ed 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -63,6 +63,9 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); int mpi_cmp_ui(MPI u, ulong v); int mpi_cmp(MPI u, MPI v); +/*-- mpi-sub-ui.c --*/ +int mpi_sub_ui(MPI w, MPI u, unsigned long vval); + /*-- mpi-bit.c --*/ void mpi_normalize(MPI a); unsigned mpi_get_nbits(MPI a); diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile index d5874a7f5ff9..43b8fce14079 100644 --- a/lib/mpi/Makefile +++ b/lib/mpi/Makefile @@ -16,6 +16,7 @@ mpi-y = \ mpicoder.o \ mpi-bit.o \ mpi-cmp.o \ + mpi-sub-ui.o \ mpih-cmp.o \ mpih-div.o \ mpih-mul.o \ diff --git a/lib/mpi/mpi-sub-ui.c b/lib/mpi/mpi-sub-ui.c new file mode 100644 index 000000000000..b41b082b5f3e --- /dev/null +++ b/lib/mpi/mpi-sub-ui.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI. + * + * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015 + * Free Software Foundation, Inc. + * + * This file was based on the GNU MP Library source file: + * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h + * + * The GNU MP Library is free software; you can redistribute it and/or modify + * it under the terms of either: + * + * * the GNU Lesser General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your + * option) any later version. + * + * or + * + * * the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * or both in parallel, as here. + * + * The GNU MP Library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received copies of the GNU General Public License and the + * GNU Lesser General Public License along with the GNU MP Library. If not, + * see https://www.gnu.org/licenses/. + */ + +#include "mpi-internal.h" + +int mpi_sub_ui(MPI w, MPI u, unsigned long vval) +{ + if (u->nlimbs == 0) { + if (mpi_resize(w, 1) < 0) + return -ENOMEM; + w->d[0] = vval; + w->nlimbs = (vval != 0); + w->sign = (vval != 0); + return 0; + } + + /* If not space for W (and possible carry), increase space. */ + if (mpi_resize(w, u->nlimbs + 1)) + return -ENOMEM; + + if (u->sign) { + mpi_limb_t cy; + + cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + w->d[u->nlimbs] = cy; + w->nlimbs = u->nlimbs + cy; + w->sign = 1; + } else { + /* The signs are different. Need exact comparison to determine + * which operand to subtract from which. + */ + if (u->nlimbs == 1 && u->d[0] < vval) { + w->d[0] = vval - u->d[0]; + w->nlimbs = 1; + w->sign = 1; + } else { + mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + /* Size can decrease with at most one limb. */ + w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); + w->sign = 0; + } + } + + mpi_normalize(w); + return 0; +} +EXPORT_SYMBOL_GPL(mpi_sub_ui); From 90fa9ae51c1f2fa932bfa0a4d19163d49f0c1c46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stephan=20M=C3=BCller?= Date: Mon, 20 Jul 2020 19:08:32 +0200 Subject: [PATCH 139/157] crypto: dh - check validity of Z before export SP800-56A rev3 section 5.7.1.1 step 2 mandates that the validity of the calculated shared secret is verified before the data is returned to the caller. This patch adds the validation check. Signed-off-by: Stephan Mueller Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/dh.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/crypto/dh.c b/crypto/dh.c index 566f624a2de2..f84fd50ec79b 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -9,6 +9,7 @@ #include #include #include +#include #include struct dh_ctx { @@ -179,6 +180,34 @@ static int dh_compute_value(struct kpp_request *req) if (ret) goto err_free_base; + /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ + if (fips_enabled && req->src) { + MPI pone; + + /* z <= 1 */ + if (mpi_cmp_ui(val, 1) < 1) { + ret = -EBADMSG; + goto err_free_base; + } + + /* z == p - 1 */ + pone = mpi_alloc(0); + + if (!pone) { + ret = -ENOMEM; + goto err_free_base; + } + + ret = mpi_sub_ui(pone, ctx->p, 1); + if (!ret && !mpi_cmp(pone, val)) + ret = -EBADMSG; + + mpi_free(pone); + + if (ret) + goto err_free_base; + } + ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); if (ret) goto err_free_base; From 2ed5ba61cc78f102656eedc0b4c80fd14a5e8c7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stephan=20M=C3=BCller?= Date: Mon, 20 Jul 2020 19:08:52 +0200 Subject: [PATCH 140/157] crypto: dh - SP800-56A rev 3 local public key validation After the generation of a local public key, SP800-56A rev 3 section 5.6.2.1.3 mandates a validation of that key with a full validation compliant to section 5.6.2.3.1. Only if the full validation passes, the key is allowed to be used. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/dh.c | 57 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/crypto/dh.c b/crypto/dh.c index f84fd50ec79b..cd4f32092e5c 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -180,32 +180,41 @@ static int dh_compute_value(struct kpp_request *req) if (ret) goto err_free_base; - /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ - if (fips_enabled && req->src) { - MPI pone; + if (fips_enabled) { + /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ + if (req->src) { + MPI pone; - /* z <= 1 */ - if (mpi_cmp_ui(val, 1) < 1) { - ret = -EBADMSG; - goto err_free_base; + /* z <= 1 */ + if (mpi_cmp_ui(val, 1) < 1) { + ret = -EBADMSG; + goto err_free_base; + } + + /* z == p - 1 */ + pone = mpi_alloc(0); + + if (!pone) { + ret = -ENOMEM; + goto err_free_base; + } + + ret = mpi_sub_ui(pone, ctx->p, 1); + if (!ret && !mpi_cmp(pone, val)) + ret = -EBADMSG; + + mpi_free(pone); + + if (ret) + goto err_free_base; + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + } else { + if (dh_is_pubkey_valid(ctx, val)) { + ret = -EAGAIN; + goto err_free_val; + } } - - /* z == p - 1 */ - pone = mpi_alloc(0); - - if (!pone) { - ret = -ENOMEM; - goto err_free_base; - } - - ret = mpi_sub_ui(pone, ctx->p, 1); - if (!ret && !mpi_cmp(pone, val)) - ret = -EBADMSG; - - mpi_free(pone); - - if (ret) - goto err_free_base; } ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); From 6914dd53eb7af7cbc66edf7992d600b1e952c40d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stephan=20M=C3=BCller?= Date: Mon, 20 Jul 2020 19:09:23 +0200 Subject: [PATCH 141/157] crypto: ecc - SP800-56A rev 3 local public key validation After the generation of a local public key, SP800-56A rev 3 section 5.6.2.1.3 mandates a validation of that key with a full validation compliant to section 5.6.2.3.3. Only if the full validation passes, the key is allowed to be used. The patch adds the full key validation compliant to 5.6.2.3.3 and performs the required check on the generated public key. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/ecc.c | 31 ++++++++++++++++++++++++++++++- crypto/ecc.h | 14 ++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/crypto/ecc.c b/crypto/ecc.c index c8b259e59704..8acf8433ca29 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -1404,7 +1404,9 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, } ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); - if (ecc_point_is_zero(pk)) { + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + if (ecc_is_pubkey_valid_full(curve, pk)) { ret = -EAGAIN; goto err_free_point; } @@ -1452,6 +1454,33 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, } EXPORT_SYMBOL(ecc_is_pubkey_valid_partial); +/* SP800-56A section 5.6.2.3.3 full verification */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk) +{ + struct ecc_point *nQ; + + /* Checks 1 through 3 */ + int ret = ecc_is_pubkey_valid_partial(curve, pk); + + if (ret) + return ret; + + /* Check 4: Verify that nQ is the zero point. */ + nQ = ecc_alloc_point(pk->ndigits); + if (!nQ) + return -ENOMEM; + + ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits); + if (!ecc_point_is_zero(nQ)) + ret = -EINVAL; + + ecc_free_point(nQ); + + return ret; +} +EXPORT_SYMBOL(ecc_is_pubkey_valid_full); + int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, const u64 *private_key, const u64 *public_key, u64 *secret) diff --git a/crypto/ecc.h b/crypto/ecc.h index ab0eb70b9c09..d4e546b9ad79 100644 --- a/crypto/ecc.h +++ b/crypto/ecc.h @@ -147,6 +147,20 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, struct ecc_point *pk); +/** + * ecc_is_pubkey_valid_full() - Full public key validation + * + * @curve: elliptic curve domain parameters + * @pk: public key as a point + * + * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full + * Public-Key Validation Routine. + * + * Return: 0 if validation is successful, -EINVAL if validation is failed. + */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk); + /** * vli_is_zero() - Determine is vli is zero * From c6720415907f21b9c53efbe679b96c3cc9d06404 Mon Sep 17 00:00:00 2001 From: Sven Auhagen Date: Tue, 21 Jul 2020 06:37:59 +0200 Subject: [PATCH 142/157] crypto: inside-secure - irq balance Balance the irqs of the inside secure driver over all available cpus. Currently all interrupts are handled by the first CPU. From my testing with IPSec AES-GCM 256 on my MCbin with 4 Cores I get a 50% speed increase: Before the patch: 99.73 Kpps With the patch: 151.25 Kpps Signed-off-by: Sven Auhagen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 13 +++++++++++-- drivers/crypto/inside-secure/safexcel.h | 3 +++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 2cb53fbae841..fa7398e68858 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1135,11 +1135,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) static int safexcel_request_ring_irq(void *pdev, int irqid, int is_pci_dev, + int ring_id, irq_handler_t handler, irq_handler_t threaded_handler, struct safexcel_ring_irq_data *ring_irq_priv) { - int ret, irq; + int ret, irq, cpu; struct device *dev; if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) { @@ -1177,6 +1178,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid, return ret; } + /* Set affinity */ + cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + return irq; } @@ -1611,6 +1616,7 @@ static int safexcel_probe_generic(void *pdev, irq = safexcel_request_ring_irq(pdev, EIP197_IRQ_NUMBER(i, is_pci_dev), is_pci_dev, + i, safexcel_irq_ring, safexcel_irq_ring_thread, ring_irq); @@ -1619,6 +1625,7 @@ static int safexcel_probe_generic(void *pdev, return irq; } + priv->ring[i].irq = irq; priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.ring = i; INIT_WORK(&priv->ring[i].work_data.work, @@ -1756,8 +1763,10 @@ static int safexcel_remove(struct platform_device *pdev) clk_disable_unprepare(priv->reg_clk); clk_disable_unprepare(priv->clk); - for (i = 0; i < priv->config.rings; i++) + for (i = 0; i < priv->config.rings; i++) { + irq_set_affinity_hint(priv->ring[i].irq, NULL); destroy_workqueue(priv->ring[i].workqueue); + } return 0; } diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 94016c505abb..7c5fe382d272 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -707,6 +707,9 @@ struct safexcel_ring { */ struct crypto_async_request *req; struct crypto_async_request *backlog; + + /* irq of this ring */ + int irq; }; /* EIP integration context flags */ From 28ee8b0912ca2ff68c2c03ff97bf1c22634c7942 Mon Sep 17 00:00:00 2001 From: Sven Auhagen Date: Tue, 21 Jul 2020 06:40:27 +0200 Subject: [PATCH 143/157] crypto: marvell/cesa - irq balance Balance the irqs of the marvell cesa driver over all available cpus. Currently all interrupts are handled by the first CPU. From my testing with IPSec AES 256 SHA256 on my clearfog base with 2 Cores I get a 2x speed increase: Before the patch: 26.74 Kpps With the patch: 56.11 Kpps Signed-off-by: Sven Auhagen Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa/cesa.c | 11 ++++++++++- drivers/crypto/marvell/cesa/cesa.h | 1 + 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 8a5f0b0bdf77..d63bca9718dc 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -438,7 +438,7 @@ static int mv_cesa_probe(struct platform_device *pdev) struct mv_cesa_dev *cesa; struct mv_cesa_engine *engines; struct resource *res; - int irq, ret, i; + int irq, ret, i, cpu; u32 sram_size; if (cesa_dev) { @@ -505,6 +505,8 @@ static int mv_cesa_probe(struct platform_device *pdev) goto err_cleanup; } + engine->irq = irq; + /* * Not all platforms can gate the CESA clocks: do not complain * if the clock does not exist. @@ -548,6 +550,10 @@ static int mv_cesa_probe(struct platform_device *pdev) if (ret) goto err_cleanup; + /* Set affinity */ + cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN); atomic_set(&engine->load, 0); INIT_LIST_HEAD(&engine->complete_queue); @@ -570,6 +576,8 @@ err_cleanup: clk_disable_unprepare(cesa->engines[i].zclk); clk_disable_unprepare(cesa->engines[i].clk); mv_cesa_put_sram(pdev, i); + if (cesa->engines[i].irq > 0) + irq_set_affinity_hint(cesa->engines[i].irq, NULL); } return ret; @@ -586,6 +594,7 @@ static int mv_cesa_remove(struct platform_device *pdev) clk_disable_unprepare(cesa->engines[i].zclk); clk_disable_unprepare(cesa->engines[i].clk); mv_cesa_put_sram(pdev, i); + irq_set_affinity_hint(cesa->engines[i].irq, NULL); } return 0; diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index e8632d5f343f..0c9cbb681e49 100644 --- a/drivers/crypto/marvell/cesa/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -457,6 +457,7 @@ struct mv_cesa_engine { atomic_t load; struct mv_cesa_tdma_chain chain; struct list_head complete_queue; + int irq; }; /** From 958ea4e0d64e39d039245e6450f625108833e522 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 21 Jul 2020 09:05:54 +0300 Subject: [PATCH 144/157] crypto: xts - Replace memcpy() invocation with simple assignment Colin reports that the memcpy() call in xts_cts_final() trigggers a "Overlapping buffer in memory copy" warning in Coverity, which is a false postive, given that tail is guaranteed to be smaller than or equal to the distance between source and destination. However, given that any additional bytes that we copy will be ignored anyway, we can simply copy XTS_BLOCK_SIZE unconditionally, which means we can use struct assignment of the array members instead, which is likely to be more efficient as well. Addresses-Coverity: ("Overlapping buffer in memory copy") Fixes: 8083b1bf8163 ("crypto: xts - add support for ciphertext stealing") Reported-by: Colin Ian King Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/xts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/xts.c b/crypto/xts.c index 3c3ed02c7663..ad45b009774b 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -171,7 +171,7 @@ static int xts_cts_final(struct skcipher_request *req, offset - XTS_BLOCK_SIZE); scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); - memcpy(b + 1, b, tail); + b[1] = b[0]; scatterwalk_map_and_copy(b, req->src, offset, tail, 0); le128_xor(b, &rctx->t, b); From f892a21f51162d2c443c972d450f73ffa7fb49bb Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 22 Jul 2020 15:00:58 +0530 Subject: [PATCH 145/157] crypto: ccp - use generic power management Drivers using legacy power management .suspen()/.resume() callbacks have to manage PCI states and device's PM states themselves. They also need to take care of standard configuration registers. Switch to generic power management framework using a single "struct dev_pm_ops" variable to take the unnecessary load from the driver. This also avoids the need for the driver to directly call most of the PCI helper functions and device power state control functions as through the generic framework, PCI Core takes care of the necessary operations, and drivers are required to do only device-specific jobs. Signed-off-by: Vaibhav Gupta Acked-by: John Allen Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.c | 4 +--- drivers/crypto/ccp/sp-dev.c | 6 ++---- drivers/crypto/ccp/sp-dev.h | 6 +++--- drivers/crypto/ccp/sp-pci.c | 17 ++++++----------- drivers/crypto/ccp/sp-platform.c | 2 +- 5 files changed, 13 insertions(+), 22 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 19ac509ed76e..0971ee60f840 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -531,7 +531,6 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) return len; } -#ifdef CONFIG_PM bool ccp_queues_suspended(struct ccp_device *ccp) { unsigned int suspended = 0; @@ -549,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp) return ccp->cmd_q_count == suspended; } -int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) +int ccp_dev_suspend(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; @@ -601,7 +600,6 @@ int ccp_dev_resume(struct sp_device *sp) return 0; } -#endif int ccp_dev_init(struct sp_device *sp) { diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index ce42675d3274..6284a15e5047 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -211,13 +211,12 @@ void sp_destroy(struct sp_device *sp) sp_del_device(sp); } -#ifdef CONFIG_PM -int sp_suspend(struct sp_device *sp, pm_message_t state) +int sp_suspend(struct sp_device *sp) { int ret; if (sp->dev_vdata->ccp_vdata) { - ret = ccp_dev_suspend(sp, state); + ret = ccp_dev_suspend(sp); if (ret) return ret; } @@ -237,7 +236,6 @@ int sp_resume(struct sp_device *sp) return 0; } -#endif struct sp_device *sp_get_psp_master_device(void) { diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index f913f1494af9..0218d0670eee 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -119,7 +119,7 @@ int sp_init(struct sp_device *sp); void sp_destroy(struct sp_device *sp); struct sp_device *sp_get_master(void); -int sp_suspend(struct sp_device *sp, pm_message_t state); +int sp_suspend(struct sp_device *sp); int sp_resume(struct sp_device *sp); int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); @@ -134,7 +134,7 @@ struct sp_device *sp_get_psp_master_device(void); int ccp_dev_init(struct sp_device *sp); void ccp_dev_destroy(struct sp_device *sp); -int ccp_dev_suspend(struct sp_device *sp, pm_message_t state); +int ccp_dev_suspend(struct sp_device *sp); int ccp_dev_resume(struct sp_device *sp); #else /* !CONFIG_CRYPTO_DEV_SP_CCP */ @@ -145,7 +145,7 @@ static inline int ccp_dev_init(struct sp_device *sp) } static inline void ccp_dev_destroy(struct sp_device *sp) { } -static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) +static inline int ccp_dev_suspend(struct sp_device *sp) { return 0; } diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index cb6cb47053f4..f471dbaef1fb 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -252,23 +252,19 @@ static void sp_pci_remove(struct pci_dev *pdev) sp_free_irqs(sp); } -#ifdef CONFIG_PM -static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused sp_pci_suspend(struct device *dev) { - struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); - return sp_suspend(sp, state); + return sp_suspend(sp); } -static int sp_pci_resume(struct pci_dev *pdev) +static int __maybe_unused sp_pci_resume(struct device *dev) { - struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); return sp_resume(sp); } -#endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP static const struct sev_vdata sevv1 = { @@ -365,15 +361,14 @@ static const struct pci_device_id sp_pci_table[] = { }; MODULE_DEVICE_TABLE(pci, sp_pci_table); +static SIMPLE_DEV_PM_OPS(sp_pci_pm_ops, sp_pci_suspend, sp_pci_resume); + static struct pci_driver sp_pci_driver = { .name = "ccp", .id_table = sp_pci_table, .probe = sp_pci_probe, .remove = sp_pci_remove, -#ifdef CONFIG_PM - .suspend = sp_pci_suspend, - .resume = sp_pci_resume, -#endif + .driver.pm = &sp_pci_pm_ops, }; int sp_pci_init(void) diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 831aac1393a2..9dba52fbee99 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -207,7 +207,7 @@ static int sp_platform_suspend(struct platform_device *pdev, struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); - return sp_suspend(sp, state); + return sp_suspend(sp); } static int sp_platform_resume(struct platform_device *pdev) From 81f2288805ca7f00f8f10e249488f0c6c2abf2aa Mon Sep 17 00:00:00 2001 From: Franck LENORMAND Date: Wed, 22 Jul 2020 15:14:52 +0300 Subject: [PATCH 146/157] crypto: caam - remove deadcode on 32-bit platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When building on a platform with a 32bit DMA address, taking the upper 32 bits makes no sense. Signed-off-by: Franck LENORMAND Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/regs.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 0f810bc13b2b..af61f3a2c0d4 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -173,9 +173,14 @@ static inline u64 rd_reg64(void __iomem *reg) static inline u64 cpu_to_caam_dma64(dma_addr_t value) { - if (caam_imx) - return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | - (u64)cpu_to_caam32(upper_32_bits(value))); + if (caam_imx) { + u64 ret_val = (u64)cpu_to_caam32(lower_32_bits(value)) << 32; + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + ret_val |= (u64)cpu_to_caam32(upper_32_bits(value)); + + return ret_val; + } return cpu_to_caam64(value); } From d9f2d010302e858cada492c99da301858916ff28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Wed, 22 Jul 2020 15:14:53 +0300 Subject: [PATCH 147/157] crypto: caam/qi2 - fix error reporting for caam_hash_alloc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix error reporting when preparation of an hmac algorithm for registration fails: print the hmac algorithm name, not the unkeyed hash algorithm name. Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 1b0c28675906..811d34eee4f2 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -5238,7 +5238,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); dev_warn(dev, "%s hash alg allocation failed: %d\n", - alg->driver_name, err); + alg->hmac_driver_name, err); continue; } From b7ec41da3b8314d262aef7be09b5684c5f9dd43a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Wed, 22 Jul 2020 15:14:54 +0300 Subject: [PATCH 148/157] crypto: caam/qi2 - create ahash shared descriptors only once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For keyed hash algorithms, shared descriptors are currently generated twice: -at tfm initialization time, in cra_init() callback -in setkey() callback Since it's mandatory to call setkey() for keyed algorithms, drop the generation in cra_init(). This is similar to the change in caamhash (caam/jr top-level library) commit 9a2537d0ebc9 ("crypto: caam - create ahash shared descriptors only once") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 811d34eee4f2..1e901344db67 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -4500,7 +4500,11 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct caam_hash_state)); - return ahash_set_sh_desc(ahash); + /* + * For keyed hash algorithms shared descriptors + * will be created later in setkey() callback + */ + return alg->setkey ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) From da6a66853a381864f4b040832cf11f0dbba0a097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Wed, 22 Jul 2020 15:14:55 +0300 Subject: [PATCH 149/157] crypto: caam - silence .setkey in case of bad key length MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In case of bad key length, driver emits "key size mismatch" messages, but only for xts(aes) algorithms. Reduce verbosity by making them visible only when debugging. This way crypto fuzz testing log cleans up a bit. Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 2 +- drivers/crypto/caam/caamalg_qi.c | 2 +- drivers/crypto/caam/caamalg_qi2.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index e94e7f27f1d0..91feda5b63f6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -832,7 +832,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(jrdev, "key size mismatch\n"); + dev_dbg(jrdev, "key size mismatch\n"); return -EINVAL; } diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index efe8f15a4a51..bb1c0106a95c 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -728,7 +728,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, int ret = 0; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(jrdev, "key size mismatch\n"); + dev_dbg(jrdev, "key size mismatch\n"); return -EINVAL; } diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 1e901344db67..700e1d50211f 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -1058,7 +1058,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(dev, "key size mismatch\n"); + dev_dbg(dev, "key size mismatch\n"); return -EINVAL; } From 26c4a51fdec857bc418bc6040e7b29662ce36f00 Mon Sep 17 00:00:00 2001 From: Dan Douglass Date: Wed, 22 Jul 2020 15:14:56 +0300 Subject: [PATCH 150/157] crypto: caam/jr - remove incorrect reference to caam_jr_register() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit caam_jr_register() function is no longer part of the driver since commit 6dad41158db6 ("crypto: caam - Remove unused functions from Job Ring") This patch removes a comment referencing the function. Signed-off-by: Dan Douglass Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 4af22e7ceb4f..bf6b03b17251 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -339,8 +339,7 @@ EXPORT_SYMBOL(caam_jr_free); * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's * descriptor. - * @dev: device of the job ring to be used. This device should have - * been assigned prior by caam_jr_register(). + * @dev: struct device of the job ring to be used * @desc: points to a job descriptor that execute our request. All * descriptors (and all referenced data) must be in a DMAable * region, and all data references must be physical addresses From ee0a6de9aa3755617e12dc5629617d27250b5846 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Wed, 22 Jul 2020 15:14:57 +0300 Subject: [PATCH 151/157] crypto: caam - add more RNG hw error codes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some cases, e.g. when TRNG is not properly configured, the RNG module could issue a "Hardware error" at runtime. "Continuos check" error is emitted when some of the BISTs fail. Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/error.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 17c6108b6d41..72db90176b1a 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -212,6 +212,9 @@ static const char * const rng_err_id_list[] = { "Prediction resistance and test request", "Uninstantiate", "Secure key generation", + "", + "Hardware error", + "Continuous check" }; static int report_ccb_status(struct device *jrdev, const u32 status, From e4d6efef01fc4d774ccba20c09544ce08649c88b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Wed, 22 Jul 2020 15:14:58 +0300 Subject: [PATCH 152/157] crypto: caam/qi2 - add module alias MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a module alias, to enable udev-based module autoloading: $ modinfo -F alias drivers/crypto/caam/dpaa2_caam.ko fsl-mc:v00001957ddpseci Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 700e1d50211f..66ae1d581168 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -5405,6 +5405,7 @@ static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { }, { .vendor = 0x0 } }; +MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table); static struct fsl_mc_driver dpaa2_caam_driver = { .driver = { From d86f4431bbfb8cd69b83e0c42456143290cebb12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E7=90=B0=E6=9D=B0=20=28Zhou=20Yanjie=29?= Date: Thu, 23 Jul 2020 14:24:45 +0800 Subject: [PATCH 153/157] dt-bindings: RNG: Add Ingenic RNG bindings. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the RNG bindings for the JZ4780 SoC and the X1000 SoC from Ingenic. Signed-off-by: 周琰杰 (Zhou Yanjie) Reviewed-by: Rob Herring Signed-off-by: Herbert Xu --- .../devicetree/bindings/rng/ingenic,rng.yaml | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 Documentation/devicetree/bindings/rng/ingenic,rng.yaml diff --git a/Documentation/devicetree/bindings/rng/ingenic,rng.yaml b/Documentation/devicetree/bindings/rng/ingenic,rng.yaml new file mode 100644 index 000000000000..b2e4a6a7f93a --- /dev/null +++ b/Documentation/devicetree/bindings/rng/ingenic,rng.yaml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/ingenic,rng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Bindings for RNG in Ingenic SoCs + +maintainers: + - 周琰杰 (Zhou Yanjie) + +description: + The Random Number Generator in Ingenic SoCs. + +properties: + compatible: + enum: + - ingenic,jz4780-rng + - ingenic,x1000-rng + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + rng: rng@d8 { + compatible = "ingenic,jz4780-rng"; + reg = <0xd8 0x8>; + }; +... From 190873a0ea4500433ae818521cad20d37f9ee059 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E7=90=B0=E6=9D=B0=20=28Zhou=20Yanjie=29?= Date: Thu, 23 Jul 2020 14:24:46 +0800 Subject: [PATCH 154/157] crypto: ingenic - Add hardware RNG for Ingenic JZ4780 and X1000 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add JZ4780 SoC and X1000 SoC random number generator driver, based on PrasannaKumar Muralidharan's JZ4780 RNG driver. Tested-by: 周正 (Zhou Zheng) Tested-by: Mathieu Malaterre Suggested-by: Jeffrey Walton Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: 周琰杰 (Zhou Yanjie) Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 15 +++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/ingenic-rng.c | 154 +++++++++++++++++++++++++++ 3 files changed, 170 insertions(+) create mode 100644 drivers/char/hw_random/ingenic-rng.c diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 98f95a09ce55..f976a49e1fb5 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -267,6 +267,21 @@ config HW_RANDOM_IMX_RNGC If unsure, say Y. +config HW_RANDOM_INGENIC_RNG + tristate "Ingenic Random Number Generator support" + depends on HW_RANDOM + depends on MACH_JZ4780 || MACH_X1000 + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number Generator + hardware found in ingenic JZ4780 and X1000 SoC. MIPS Creator CI20 uses + JZ4780 SoC, YSH & ATIL CU1000-Neo uses X1000 SoC. + + To compile this driver as a module, choose M here: the + module will be called ingenic-rng. + + If unsure, say Y. + config HW_RANDOM_NOMADIK tristate "ST-Ericsson Nomadik Random Number Generator support" depends on ARCH_NOMADIK diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 4ed9feb094a1..26ae06844f09 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o +obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c new file mode 100644 index 000000000000..d704cef64b64 --- /dev/null +++ b/drivers/char/hw_random/ingenic-rng.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Ingenic Random Number Generator driver + * Copyright (c) 2017 PrasannaKumar Muralidharan + * Copyright (c) 2020 周琰杰 (Zhou Yanjie) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* RNG register offsets */ +#define RNG_REG_ERNG_OFFSET 0x0 +#define RNG_REG_RNG_OFFSET 0x4 + +/* bits within the ERND register */ +#define ERNG_READY BIT(31) +#define ERNG_ENABLE BIT(0) + +enum ingenic_rng_version { + ID_JZ4780, + ID_X1000, +}; + +/* Device associated memory */ +struct ingenic_rng { + enum ingenic_rng_version version; + + void __iomem *base; + struct hwrng rng; +}; + +static int ingenic_rng_init(struct hwrng *rng) +{ + struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); + + writel(ERNG_ENABLE, priv->base + RNG_REG_ERNG_OFFSET); + + return 0; +} + +static void ingenic_rng_cleanup(struct hwrng *rng) +{ + struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); + + writel(0, priv->base + RNG_REG_ERNG_OFFSET); +} + +static int ingenic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); + u32 *data = buf; + u32 status; + int ret; + + if (priv->version >= ID_X1000) { + ret = readl_poll_timeout(priv->base + RNG_REG_ERNG_OFFSET, status, + status & ERNG_READY, 10, 1000); + if (ret == -ETIMEDOUT) { + pr_err("%s: Wait for RNG data ready timeout\n", __func__); + return ret; + } + } else { + /* + * A delay is required so that the current RNG data is not bit shifted + * version of previous RNG data which could happen if random data is + * read continuously from this device. + */ + udelay(20); + } + + *data = readl(priv->base + RNG_REG_RNG_OFFSET); + + return 4; +} + +static int ingenic_rng_probe(struct platform_device *pdev) +{ + struct ingenic_rng *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + pr_err("%s: Failed to map RNG registers\n", __func__); + ret = PTR_ERR(priv->base); + goto err_free_rng; + } + + priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev); + + priv->rng.name = pdev->name; + priv->rng.init = ingenic_rng_init; + priv->rng.cleanup = ingenic_rng_cleanup; + priv->rng.read = ingenic_rng_read; + + ret = hwrng_register(&priv->rng); + if (ret) { + dev_err(&pdev->dev, "Failed to register hwrng\n"); + goto err_free_rng; + } + + platform_set_drvdata(pdev, priv); + + dev_info(&pdev->dev, "Ingenic RNG driver registered\n"); + return 0; + +err_free_rng: + kfree(priv); + return ret; +} + +static int ingenic_rng_remove(struct platform_device *pdev) +{ + struct ingenic_rng *priv = platform_get_drvdata(pdev); + + hwrng_unregister(&priv->rng); + + writel(0, priv->base + RNG_REG_ERNG_OFFSET); + + return 0; +} + +static const struct of_device_id ingenic_rng_of_match[] = { + { .compatible = "ingenic,jz4780-rng", .data = (void *) ID_JZ4780 }, + { .compatible = "ingenic,x1000-rng", .data = (void *) ID_X1000 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ingenic_rng_of_match); + +static struct platform_driver ingenic_rng_driver = { + .probe = ingenic_rng_probe, + .remove = ingenic_rng_remove, + .driver = { + .name = "ingenic-rng", + .of_match_table = ingenic_rng_of_match, + }, +}; + +module_platform_driver(ingenic_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("PrasannaKumar Muralidharan "); +MODULE_AUTHOR("周琰杰 (Zhou Yanjie) "); +MODULE_DESCRIPTION("Ingenic Random Number Generator driver"); From 054a5540fb8f7268e2c79e9deab4242db15c8cba Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 23 Jul 2020 17:50:48 +1000 Subject: [PATCH 155/157] crypto: x86/curve25519 - Remove unused carry variables The carry variables are assigned but never used, which upsets the compiler. This patch removes them. Signed-off-by: Herbert Xu Reviewed-by: Karthikeyan Bhargavan Acked-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- arch/x86/crypto/curve25519-x86_64.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c index 8a17621f7d3a..8acbb6584a37 100644 --- a/arch/x86/crypto/curve25519-x86_64.c +++ b/arch/x86/crypto/curve25519-x86_64.c @@ -948,10 +948,8 @@ static void store_felem(u64 *b, u64 *f) { u64 f30 = f[3U]; u64 top_bit0 = f30 >> (u32)63U; - u64 carry0; u64 f31; u64 top_bit; - u64 carry; u64 f0; u64 f1; u64 f2; @@ -970,11 +968,11 @@ static void store_felem(u64 *b, u64 *f) u64 o2; u64 o3; f[3U] = f30 & (u64)0x7fffffffffffffffU; - carry0 = add_scalar(f, f, (u64)19U * top_bit0); + add_scalar(f, f, (u64)19U * top_bit0); f31 = f[3U]; top_bit = f31 >> (u32)63U; f[3U] = f31 & (u64)0x7fffffffffffffffU; - carry = add_scalar(f, f, (u64)19U * top_bit); + add_scalar(f, f, (u64)19U * top_bit); f0 = f[0U]; f1 = f[1U]; f2 = f[2U]; From 45645709704f94d2625ef15fbf7bcae51aeab975 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 24 Jul 2020 11:10:01 +0100 Subject: [PATCH 156/157] hwrng: core - remove redundant initialization of variable ret The variable ret is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/char/hw_random/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index d2d7a42d7e0d..8c1c47dd9f46 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister); static int __init hwrng_modinit(void) { - int ret = -ENOMEM; + int ret; /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); From 3cbfe80737c18ac6e635421ab676716a393d3074 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 24 Jul 2020 11:50:27 -0500 Subject: [PATCH 157/157] crypto: sa2ul - Fix inconsistent IS_ERR and PTR_ERR Fix inconsistent IS_ERR and PTR_ERR in sa_dma_init(). The proper pointer to be passed as argument to PTR_ERR() is dd->dma_tx. This bug was detected with the help of Coccinelle. Fixes: 7694b6ca649f ("crypto: sa2ul - Add crypto driver") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Herbert Xu --- drivers/crypto/sa2ul.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index ebcdffcdb686..5bc099052bd2 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -2259,7 +2259,7 @@ static int sa_dma_init(struct sa_crypto_data *dd) dd->dma_tx = dma_request_chan(dd->dev, "tx"); if (IS_ERR(dd->dma_tx)) { - if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER) + if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER) dev_err(dd->dev, "Unable to request tx DMA channel\n"); ret = PTR_ERR(dd->dma_tx); goto err_dma_tx;