mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 05:41:55 +00:00
tls: fix race between async notify and socket close
The submitting thread (one which called recvmsg/sendmsg)
may exit as soon as the async crypto handler calls complete()
so any code past that point risks touching already freed data.
Try to avoid the locking and extra flags altogether.
Have the main thread hold an extra reference, this way
we can depend solely on the atomic ref counter for
synchronization.
Don't futz with reiniting the completion, either, we are now
tightly controlling when completion fires.
Reported-by: valis <sec@valis.email>
Fixes: 0cada33241
("net/tls: fix race condition causing kernel panic")
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Simon Horman <horms@kernel.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c57ca512f3
commit
aec7961916
@ -97,9 +97,6 @@ struct tls_sw_context_tx {
|
||||
struct tls_rec *open_rec;
|
||||
struct list_head tx_list;
|
||||
atomic_t encrypt_pending;
|
||||
/* protect crypto_wait with encrypt_pending */
|
||||
spinlock_t encrypt_compl_lock;
|
||||
int async_notify;
|
||||
u8 async_capable:1;
|
||||
|
||||
#define BIT_TX_SCHEDULED 0
|
||||
@ -136,8 +133,6 @@ struct tls_sw_context_rx {
|
||||
struct tls_strparser strp;
|
||||
|
||||
atomic_t decrypt_pending;
|
||||
/* protect crypto_wait with decrypt_pending*/
|
||||
spinlock_t decrypt_compl_lock;
|
||||
struct sk_buff_head async_hold;
|
||||
struct wait_queue_head wq;
|
||||
};
|
||||
|
@ -224,22 +224,15 @@ static void tls_decrypt_done(void *data, int err)
|
||||
|
||||
kfree(aead_req);
|
||||
|
||||
spin_lock_bh(&ctx->decrypt_compl_lock);
|
||||
if (!atomic_dec_return(&ctx->decrypt_pending))
|
||||
if (atomic_dec_and_test(&ctx->decrypt_pending))
|
||||
complete(&ctx->async_wait.completion);
|
||||
spin_unlock_bh(&ctx->decrypt_compl_lock);
|
||||
}
|
||||
|
||||
static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
|
||||
{
|
||||
int pending;
|
||||
|
||||
spin_lock_bh(&ctx->decrypt_compl_lock);
|
||||
reinit_completion(&ctx->async_wait.completion);
|
||||
pending = atomic_read(&ctx->decrypt_pending);
|
||||
spin_unlock_bh(&ctx->decrypt_compl_lock);
|
||||
if (pending)
|
||||
if (!atomic_dec_and_test(&ctx->decrypt_pending))
|
||||
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
|
||||
atomic_inc(&ctx->decrypt_pending);
|
||||
|
||||
return ctx->async_wait.err;
|
||||
}
|
||||
@ -267,6 +260,7 @@ static int tls_do_decryption(struct sock *sk,
|
||||
aead_request_set_callback(aead_req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tls_decrypt_done, aead_req);
|
||||
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
|
||||
atomic_inc(&ctx->decrypt_pending);
|
||||
} else {
|
||||
aead_request_set_callback(aead_req,
|
||||
@ -455,7 +449,6 @@ static void tls_encrypt_done(void *data, int err)
|
||||
struct sk_msg *msg_en;
|
||||
bool ready = false;
|
||||
struct sock *sk;
|
||||
int pending;
|
||||
|
||||
msg_en = &rec->msg_encrypted;
|
||||
|
||||
@ -494,12 +487,8 @@ static void tls_encrypt_done(void *data, int err)
|
||||
ready = true;
|
||||
}
|
||||
|
||||
spin_lock_bh(&ctx->encrypt_compl_lock);
|
||||
pending = atomic_dec_return(&ctx->encrypt_pending);
|
||||
|
||||
if (!pending && ctx->async_notify)
|
||||
if (atomic_dec_and_test(&ctx->encrypt_pending))
|
||||
complete(&ctx->async_wait.completion);
|
||||
spin_unlock_bh(&ctx->encrypt_compl_lock);
|
||||
|
||||
if (!ready)
|
||||
return;
|
||||
@ -511,22 +500,9 @@ static void tls_encrypt_done(void *data, int err)
|
||||
|
||||
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
|
||||
{
|
||||
int pending;
|
||||
|
||||
spin_lock_bh(&ctx->encrypt_compl_lock);
|
||||
ctx->async_notify = true;
|
||||
|
||||
pending = atomic_read(&ctx->encrypt_pending);
|
||||
spin_unlock_bh(&ctx->encrypt_compl_lock);
|
||||
if (pending)
|
||||
if (!atomic_dec_and_test(&ctx->encrypt_pending))
|
||||
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
|
||||
else
|
||||
reinit_completion(&ctx->async_wait.completion);
|
||||
|
||||
/* There can be no concurrent accesses, since we have no
|
||||
* pending encrypt operations
|
||||
*/
|
||||
WRITE_ONCE(ctx->async_notify, false);
|
||||
atomic_inc(&ctx->encrypt_pending);
|
||||
|
||||
return ctx->async_wait.err;
|
||||
}
|
||||
@ -577,6 +553,7 @@ static int tls_do_encryption(struct sock *sk,
|
||||
|
||||
/* Add the record in tx_list */
|
||||
list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
|
||||
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
|
||||
atomic_inc(&ctx->encrypt_pending);
|
||||
|
||||
rc = crypto_aead_encrypt(aead_req);
|
||||
@ -2601,7 +2578,7 @@ static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct soc
|
||||
}
|
||||
|
||||
crypto_init_wait(&sw_ctx_tx->async_wait);
|
||||
spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
|
||||
atomic_set(&sw_ctx_tx->encrypt_pending, 1);
|
||||
INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
|
||||
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
|
||||
sw_ctx_tx->tx_work.sk = sk;
|
||||
@ -2622,7 +2599,7 @@ static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
|
||||
}
|
||||
|
||||
crypto_init_wait(&sw_ctx_rx->async_wait);
|
||||
spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
|
||||
atomic_set(&sw_ctx_rx->decrypt_pending, 1);
|
||||
init_waitqueue_head(&sw_ctx_rx->wq);
|
||||
skb_queue_head_init(&sw_ctx_rx->rx_list);
|
||||
skb_queue_head_init(&sw_ctx_rx->async_hold);
|
||||
|
Loading…
Reference in New Issue
Block a user