mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
net/tls: Fix authentication failure in CCM mode
When the TLS cipher suite uses CCM mode, including AES CCM and
SM4 CCM, the first byte of the B0 block is flags, and the real
IV starts from the second byte. The XOR operation of the IV and
rec_seq should be skip this byte, that is, add the iv_offset.
Fixes: f295b3ae9f
("net/tls: Add support of AES128-CCM based ciphers")
Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
Cc: Vakul Garg <vakul.garg@nxp.com>
Cc: stable@vger.kernel.org # v5.2+
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ef56b64001
commit
5961060692
@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk,
|
||||
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
|
||||
prot->iv_size + prot->salt_size);
|
||||
|
||||
xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
|
||||
xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
|
||||
|
||||
sge->offset += prot->prepend_size;
|
||||
sge->length -= prot->prepend_size;
|
||||
@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
|
||||
else
|
||||
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
|
||||
|
||||
xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
|
||||
xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
|
||||
|
||||
/* Prepare AAD */
|
||||
tls_make_aad(aad, rxm->full_len - prot->overhead_size +
|
||||
|
Loading…
Reference in New Issue
Block a user