net: annotate lockless accesses to sk->sk_ack_backlog
sk->sk_ack_backlog can be read without any lock being held. We need to use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing and/or potential KCSAN warnings. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7976a11b30
commit
288efe8606
@ -859,17 +859,17 @@ static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
|
|||||||
|
|
||||||
static inline void sk_acceptq_removed(struct sock *sk)
|
static inline void sk_acceptq_removed(struct sock *sk)
|
||||||
{
|
{
|
||||||
sk->sk_ack_backlog--;
|
WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sk_acceptq_added(struct sock *sk)
|
static inline void sk_acceptq_added(struct sock *sk)
|
||||||
{
|
{
|
||||||
sk->sk_ack_backlog++;
|
WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool sk_acceptq_is_full(const struct sock *sk)
|
static inline bool sk_acceptq_is_full(const struct sock *sk)
|
||||||
{
|
{
|
||||||
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
|
return READ_ONCE(sk->sk_ack_backlog) > sk->sk_max_ack_backlog;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3225,7 +3225,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
|||||||
* tcpi_unacked -> Number of children ready for accept()
|
* tcpi_unacked -> Number of children ready for accept()
|
||||||
* tcpi_sacked -> max backlog
|
* tcpi_sacked -> max backlog
|
||||||
*/
|
*/
|
||||||
info->tcpi_unacked = sk->sk_ack_backlog;
|
info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
|
||||||
info->tcpi_sacked = sk->sk_max_ack_backlog;
|
info->tcpi_sacked = sk->sk_max_ack_backlog;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
|||||||
struct tcp_info *info = _info;
|
struct tcp_info *info = _info;
|
||||||
|
|
||||||
if (inet_sk_state_load(sk) == TCP_LISTEN) {
|
if (inet_sk_state_load(sk) == TCP_LISTEN) {
|
||||||
r->idiag_rqueue = sk->sk_ack_backlog;
|
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
||||||
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
||||||
} else if (sk->sk_type == SOCK_STREAM) {
|
} else if (sk->sk_type == SOCK_STREAM) {
|
||||||
const struct tcp_sock *tp = tcp_sk(sk);
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
@ -2451,7 +2451,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
|
|||||||
|
|
||||||
state = inet_sk_state_load(sk);
|
state = inet_sk_state_load(sk);
|
||||||
if (state == TCP_LISTEN)
|
if (state == TCP_LISTEN)
|
||||||
rx_queue = sk->sk_ack_backlog;
|
rx_queue = READ_ONCE(sk->sk_ack_backlog);
|
||||||
else
|
else
|
||||||
/* Because we don't lock the socket,
|
/* Because we don't lock the socket,
|
||||||
* we might find a transient negative value.
|
* we might find a transient negative value.
|
||||||
|
@ -1891,7 +1891,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
|||||||
|
|
||||||
state = inet_sk_state_load(sp);
|
state = inet_sk_state_load(sp);
|
||||||
if (state == TCP_LISTEN)
|
if (state == TCP_LISTEN)
|
||||||
rx_queue = sp->sk_ack_backlog;
|
rx_queue = READ_ONCE(sp->sk_ack_backlog);
|
||||||
else
|
else
|
||||||
/* Because we don't lock the socket,
|
/* Because we don't lock the socket,
|
||||||
* we might find a transient negative value.
|
* we might find a transient negative value.
|
||||||
|
@ -521,7 +521,7 @@ META_COLLECTOR(int_sk_ack_bl)
|
|||||||
*err = -1;
|
*err = -1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
dst->value = sk->sk_ack_backlog;
|
dst->value = READ_ONCE(sk->sk_ack_backlog);
|
||||||
}
|
}
|
||||||
|
|
||||||
META_COLLECTOR(int_sk_max_ack_bl)
|
META_COLLECTOR(int_sk_max_ack_bl)
|
||||||
|
@ -425,7 +425,7 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
|||||||
r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
|
r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
|
||||||
r->idiag_wqueue = infox->asoc->sndbuf_used;
|
r->idiag_wqueue = infox->asoc->sndbuf_used;
|
||||||
} else {
|
} else {
|
||||||
r->idiag_rqueue = sk->sk_ack_backlog;
|
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
||||||
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
||||||
}
|
}
|
||||||
if (infox->sctpinfo)
|
if (infox->sctpinfo)
|
||||||
|
Loading…
Reference in New Issue
Block a user