dccp ccid-2: Stop polling

This updates CCID2 to use the CCID dequeuing mechanism, converting from
previous constant-polling to a now event-driven mechanism.

Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
This commit is contained in:
Gerrit Renker 2008-09-04 07:30:19 +02:00
parent 146993cf51
commit 83337dae6c
2 changed files with 18 additions and 8 deletions

View File

@ -123,12 +123,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx)
static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{ {
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
return CCID_PACKET_WILL_DEQUEUE_LATER;
if (hctx->pipe < hctx->cwnd) return CCID_PACKET_SEND_AT_ONCE;
return 0;
return 1; /* XXX CCID should dequeue when ready instead of polling */
} }
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
@ -168,6 +165,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
{ {
struct sock *sk = (struct sock *)data; struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
long s; long s;
bh_lock_sock(sk); bh_lock_sock(sk);
@ -187,8 +185,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
if (s > 60) if (s > 60)
hctx->rto = 60 * HZ; hctx->rto = 60 * HZ;
ccid2_start_rto_timer(sk);
/* adjust pipe, cwnd etc */ /* adjust pipe, cwnd etc */
hctx->ssthresh = hctx->cwnd / 2; hctx->ssthresh = hctx->cwnd / 2;
if (hctx->ssthresh < 2) if (hctx->ssthresh < 2)
@ -205,6 +201,11 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
hctx->rpdupack = -1; hctx->rpdupack = -1;
ccid2_change_l_ack_ratio(sk, 1); ccid2_change_l_ack_ratio(sk, 1);
ccid2_hc_tx_check_sanity(hctx); ccid2_hc_tx_check_sanity(hctx);
/* if we were blocked before, we may now send cwnd=1 packet */
if (sender_was_blocked)
tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
ccid2_start_rto_timer(sk);
out: out:
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);
@ -455,6 +456,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
struct dccp_ackvec_parsed *avp; struct dccp_ackvec_parsed *avp;
u64 ackno, seqno; u64 ackno, seqno;
struct ccid2_seq *seqp; struct ccid2_seq *seqp;
@ -640,6 +642,9 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
ccid2_hc_tx_check_sanity(hctx); ccid2_hc_tx_check_sanity(hctx);
done: done:
/* check if incoming Acks allow pending packets to be sent */
if (sender_was_blocked && !ccid2_cwnd_network_limited(hctx))
tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
dccp_ackvec_parsed_cleanup(&hctx->av_chunks); dccp_ackvec_parsed_cleanup(&hctx->av_chunks);
} }

View File

@ -70,6 +70,11 @@ struct ccid2_hc_tx_sock {
struct list_head av_chunks; struct list_head av_chunks;
}; };
static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hctx)
{
return (hctx->pipe >= hctx->cwnd);
}
struct ccid2_hc_rx_sock { struct ccid2_hc_rx_sock {
int data; int data;
}; };