[DCCP]: Implement the CLOSING timer

So that we retransmit CLOSE/CLOSEREQ packets till they elicit an
answer or we hit a timeout.

Most of the machinery uses TCP approaches, this code has to be
polished & audited, but this is better than we had before.

Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Arnaldo Carvalho de Melo 2005-08-23 21:50:06 -07:00 committed by David S. Miller
parent 58e45131dc
commit 7ad07e7cf3
4 changed files with 47 additions and 32 deletions

View File

@ -255,7 +255,7 @@ extern int dccp_v4_checksum(const struct sk_buff *skb,
extern int dccp_v4_send_reset(struct sock *sk, extern int dccp_v4_send_reset(struct sock *sk,
enum dccp_reset_codes code); enum dccp_reset_codes code);
extern void dccp_send_close(struct sock *sk); extern void dccp_send_close(struct sock *sk, const int active);
struct dccp_skb_cb { struct dccp_skb_cb {
__u8 dccpd_type; __u8 dccpd_type;

View File

@ -31,14 +31,9 @@ static void dccp_fin(struct sock *sk, struct sk_buff *skb)
static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb) static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
{ {
switch (sk->sk_state) { dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
case DCCP_PARTOPEN: dccp_fin(sk, skb);
case DCCP_OPEN: dccp_set_state(sk, DCCP_CLOSED);
dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
dccp_fin(sk, skb);
dccp_set_state(sk, DCCP_CLOSED);
break;
}
} }
static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
@ -54,13 +49,8 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
return; return;
} }
switch (sk->sk_state) { dccp_set_state(sk, DCCP_CLOSING);
case DCCP_PARTOPEN: dccp_send_close(sk, 0);
case DCCP_OPEN:
dccp_set_state(sk, DCCP_CLOSING);
dccp_send_close(sk);
break;
}
} }
static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
@ -562,6 +552,12 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_PKT_SYNC); DCCP_PKT_SYNC);
goto discard; goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
dccp_rcv_closereq(sk, skb);
goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSE) {
dccp_rcv_close(sk, skb);
return 0;
} }
switch (sk->sk_state) { switch (sk->sk_state) {

View File

@ -96,8 +96,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
inet->daddr); inet->daddr);
if (dcb->dccpd_type == DCCP_PKT_ACK || if (set_ack)
dcb->dccpd_type == DCCP_PKT_DATAACK)
dccp_event_ack_sent(sk); dccp_event_ack_sent(sk);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS); DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
@ -429,18 +428,15 @@ void dccp_send_sync(struct sock *sk, const u64 seq,
* cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
* any circumstances. * any circumstances.
*/ */
void dccp_send_close(struct sock *sk) void dccp_send_close(struct sock *sk, const int active)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;
/* Socket is locked, keep trying until memory is available. */ skb = alloc_skb(sk->sk_prot->max_header, prio);
for (;;) { if (skb == NULL)
skb = alloc_skb(sk->sk_prot->max_header, GFP_KERNEL); return;
if (skb != NULL)
break;
yield();
}
/* Reserve space for headers and prepare control bits. */ /* Reserve space for headers and prepare control bits. */
skb_reserve(skb, sk->sk_prot->max_header); skb_reserve(skb, sk->sk_prot->max_header);
@ -449,7 +445,12 @@ void dccp_send_close(struct sock *sk)
DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
skb_set_owner_w(skb, sk); skb_set_owner_w(skb, sk);
dccp_transmit_skb(sk, skb); if (active) {
BUG_TRAP(sk->sk_send_head == NULL);
sk->sk_send_head = skb;
dccp_transmit_skb(sk, skb_clone(skb, prio));
} else
dccp_transmit_skb(sk, skb);
ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk); ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk); ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);

View File

@ -402,12 +402,15 @@ void dccp_close(struct sock *sk, long timeout)
/* Check zero linger _after_ checking for unread data. */ /* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0); sk->sk_prot->disconnect(sk, 0);
} else if (dccp_close_state(sk)) { } else if (dccp_close_state(sk)) {
dccp_send_close(sk); dccp_send_close(sk, 1);
} }
sk_stream_wait_close(sk, timeout); sk_stream_wait_close(sk, timeout);
adjudge_to_death: adjudge_to_death:
/*
* It is the last release_sock in its life. It will remove backlog.
*/
release_sock(sk); release_sock(sk);
/* /*
* Now socket is owned by kernel and we acquire BH lock * Now socket is owned by kernel and we acquire BH lock
@ -419,11 +422,26 @@ adjudge_to_death:
sock_hold(sk); sock_hold(sk);
sock_orphan(sk); sock_orphan(sk);
if (sk->sk_state != DCCP_CLOSED)
dccp_set_state(sk, DCCP_CLOSED);
atomic_inc(&dccp_orphan_count); /*
* The last release_sock may have processed the CLOSE or RESET
* packet moving sock to CLOSED state, if not we have to fire
* the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
* in draft-ietf-dccp-spec-11. -acme
*/
if (sk->sk_state == DCCP_CLOSING) {
/* FIXME: should start at 2 * RTT */
/* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
DCCP_RTO_MAX);
#if 0
/* Yeah, we should use sk->sk_prot->orphan_count, etc */
dccp_set_state(sk, DCCP_CLOSED);
#endif
}
atomic_inc(sk->sk_prot->orphan_count);
if (sk->sk_state == DCCP_CLOSED) if (sk->sk_state == DCCP_CLOSED)
inet_csk_destroy_sock(sk); inet_csk_destroy_sock(sk);