sock: deduplicate errqueue dequeue

sk->sk_error_queue is dequeued in four locations. All share the
exact same logic. Deduplicate.

Also collapse the two critical sections for dequeue (at the top of
the recv handler) and signal (at the bottom).

This moves signal generation for the next packet forward, which should
be harmless.

It also changes the behavior if the recv handler exits early with an
error. Previously, a signal for follow-up packets on the errqueue
would then not be scheduled. The new behavior, to always signal, is
arguably a bug fix.

For rxrpc, the change causes the same function to be called repeatedly
for each queued packet (because the recv handler == sk_error_report).
It is likely that all packets will fail for the same reason (e.g.,
memory exhaustion).

This code runs without sk_lock held, so it is not safe to trust that
sk->sk_err is immutable inbetween releasing q->lock and the subsequent
test. Introduce int err just to avoid this potential race.

Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Willem de Bruijn 2014-08-31 21:30:27 -04:00 committed by David S. Miller
parent 8fe2f761ca
commit 364a9e9324
6 changed files with 28 additions and 51 deletions

View File

@ -2041,6 +2041,7 @@ void sk_stop_timer(struct sock *sk, struct timer_list *timer);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
/* /*
* Recover an error report and clear atomically * Recover an error report and clear atomically

View File

@ -3491,6 +3491,26 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
} }
EXPORT_SYMBOL(sock_queue_err_skb); EXPORT_SYMBOL(sock_queue_err_skb);
struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
{
struct sk_buff_head *q = &sk->sk_error_queue;
struct sk_buff *skb, *skb_next;
int err = 0;
spin_lock_bh(&q->lock);
skb = __skb_dequeue(q);
if (skb && (skb_next = skb_peek(q)))
err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
spin_unlock_bh(&q->lock);
sk->sk_err = err;
if (err)
sk->sk_error_report(sk);
return skb;
}
EXPORT_SYMBOL(sock_dequeue_err_skb);
void __skb_tstamp_tx(struct sk_buff *orig_skb, void __skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps, struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk, int tstype) struct sock *sk, int tstype)

View File

@ -2488,11 +2488,11 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
int level, int type) int level, int type)
{ {
struct sock_exterr_skb *serr; struct sock_exterr_skb *serr;
struct sk_buff *skb, *skb2; struct sk_buff *skb;
int copied, err; int copied, err;
err = -EAGAIN; err = -EAGAIN;
skb = skb_dequeue(&sk->sk_error_queue); skb = sock_dequeue_err_skb(sk);
if (skb == NULL) if (skb == NULL)
goto out; goto out;
@ -2513,16 +2513,6 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
msg->msg_flags |= MSG_ERRQUEUE; msg->msg_flags |= MSG_ERRQUEUE;
err = copied; err = copied;
/* Reset and regenerate socket error */
spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else
spin_unlock_bh(&sk->sk_error_queue.lock);
out_free_skb: out_free_skb:
kfree_skb(skb); kfree_skb(skb);
out: out:

View File

@ -405,7 +405,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{ {
struct sock_exterr_skb *serr; struct sock_exterr_skb *serr;
struct sk_buff *skb, *skb2; struct sk_buff *skb;
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct { struct {
struct sock_extended_err ee; struct sock_extended_err ee;
@ -415,7 +415,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
int copied; int copied;
err = -EAGAIN; err = -EAGAIN;
skb = skb_dequeue(&sk->sk_error_queue); skb = sock_dequeue_err_skb(sk);
if (skb == NULL) if (skb == NULL)
goto out; goto out;
@ -462,17 +462,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
msg->msg_flags |= MSG_ERRQUEUE; msg->msg_flags |= MSG_ERRQUEUE;
err = copied; err = copied;
/* Reset and regenerate socket error */
spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
skb2 = skb_peek(&sk->sk_error_queue);
if (skb2 != NULL) {
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else
spin_unlock_bh(&sk->sk_error_queue.lock);
out_free_skb: out_free_skb:
kfree_skb(skb); kfree_skb(skb);
out: out:

View File

@ -332,7 +332,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{ {
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
struct sock_exterr_skb *serr; struct sock_exterr_skb *serr;
struct sk_buff *skb, *skb2; struct sk_buff *skb;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name); DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name);
struct { struct {
struct sock_extended_err ee; struct sock_extended_err ee;
@ -342,7 +342,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
int copied; int copied;
err = -EAGAIN; err = -EAGAIN;
skb = skb_dequeue(&sk->sk_error_queue); skb = sock_dequeue_err_skb(sk);
if (skb == NULL) if (skb == NULL)
goto out; goto out;
@ -415,17 +415,6 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
msg->msg_flags |= MSG_ERRQUEUE; msg->msg_flags |= MSG_ERRQUEUE;
err = copied; err = copied;
/* Reset and regenerate socket error */
spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else {
spin_unlock_bh(&sk->sk_error_queue.lock);
}
out_free_skb: out_free_skb:
kfree_skb(skb); kfree_skb(skb);
out: out:

View File

@ -37,7 +37,7 @@ void rxrpc_UDP_error_report(struct sock *sk)
_enter("%p{%d}", sk, local->debug_id); _enter("%p{%d}", sk, local->debug_id);
skb = skb_dequeue(&sk->sk_error_queue); skb = sock_dequeue_err_skb(sk);
if (!skb) { if (!skb) {
_leave("UDP socket errqueue empty"); _leave("UDP socket errqueue empty");
return; return;
@ -111,18 +111,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
skb_queue_tail(&trans->error_queue, skb); skb_queue_tail(&trans->error_queue, skb);
rxrpc_queue_work(&trans->error_handler); rxrpc_queue_work(&trans->error_handler);
/* reset and regenerate socket error */
spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
skb = skb_peek(&sk->sk_error_queue);
if (skb) {
sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno;
spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else {
spin_unlock_bh(&sk->sk_error_queue.lock);
}
_leave(""); _leave("");
} }