mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
net/tls: remove close callback sock unlock/lock around TX work flush
The tls close() callback currently drops the sock lock, makes a cancel_delayed_work_sync() call, and then relocks the sock. By restructuring the code we can avoid droping lock and then reclaiming it. To simplify this we do the following, tls_sk_proto_close set_bit(CLOSING) set_bit(SCHEDULE) cancel_delay_work_sync() <- cancel workqueue lock_sock(sk) ... release_sock(sk) strp_done() Setting the CLOSING bit prevents the SCHEDULE bit from being cleared by any workqueue items e.g. if one happens to be scheduled and run between when we set SCHEDULE bit and cancel work. Then because SCHEDULE bit is set now no new work will be scheduled. Tested with net selftests and bpf selftests. Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
ac78fc148d
commit
f87e62d45e
@ -162,6 +162,7 @@ struct tls_sw_context_tx {
|
||||
int async_capable;
|
||||
|
||||
#define BIT_TX_SCHEDULED 0
|
||||
#define BIT_TX_CLOSING 1
|
||||
unsigned long tx_bitmask;
|
||||
};
|
||||
|
||||
@ -360,6 +361,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
||||
int tls_sw_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
void tls_sw_close(struct sock *sk, long timeout);
|
||||
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
|
||||
void tls_sw_free_resources_tx(struct sock *sk);
|
||||
void tls_sw_free_resources_rx(struct sock *sk);
|
||||
void tls_sw_release_resources_rx(struct sock *sk);
|
||||
|
@ -268,6 +268,9 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
|
||||
void (*sk_proto_close)(struct sock *sk, long timeout);
|
||||
bool free_ctx = false;
|
||||
|
||||
if (ctx->tx_conf == TLS_SW)
|
||||
tls_sw_cancel_work_tx(ctx);
|
||||
|
||||
lock_sock(sk);
|
||||
sk_proto_close = ctx->sk_proto_close;
|
||||
|
||||
|
@ -2054,6 +2054,15 @@ static void tls_data_ready(struct sock *sk)
|
||||
}
|
||||
}
|
||||
|
||||
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
|
||||
{
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
|
||||
set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
|
||||
set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
|
||||
cancel_delayed_work_sync(&ctx->tx_work.work);
|
||||
}
|
||||
|
||||
void tls_sw_free_resources_tx(struct sock *sk)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
@ -2065,11 +2074,6 @@ void tls_sw_free_resources_tx(struct sock *sk)
|
||||
if (atomic_read(&ctx->encrypt_pending))
|
||||
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
|
||||
|
||||
release_sock(sk);
|
||||
cancel_delayed_work_sync(&ctx->tx_work.work);
|
||||
lock_sock(sk);
|
||||
|
||||
/* Tx whatever records we can transmit and abandon the rest */
|
||||
tls_tx_records(sk, -1);
|
||||
|
||||
/* Free up un-sent records in tx_list. First, free
|
||||
@ -2137,11 +2141,17 @@ static void tx_work_handler(struct work_struct *work)
|
||||
struct tx_work, work);
|
||||
struct sock *sk = tx_work->sk;
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct tls_sw_context_tx *ctx;
|
||||
|
||||
if (unlikely(!tls_ctx))
|
||||
return;
|
||||
|
||||
ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
|
||||
return;
|
||||
|
||||
if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
|
||||
return;
|
||||
|
||||
lock_sock(sk);
|
||||
tls_tx_records(sk, -1);
|
||||
release_sock(sk);
|
||||
|
Loading…
Reference in New Issue
Block a user