mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 15:41:39 +00:00
rxrpc: Fix call timeouts
Fix the rxrpc call expiration timeouts and make them settable from userspace. By analogy with other rx implementations, there should be three timeouts: (1) "Normal timeout" This is set for all calls and is triggered if we haven't received any packets from the peer in a while. It is measured from the last time we received any packet on that call. This is not reset by any connection packets (such as CHALLENGE/RESPONSE packets). If a service operation takes a long time, the server should generate PING ACKs at a duration that's substantially less than the normal timeout so is to keep both sides alive. This is set at 1/6 of normal timeout. (2) "Idle timeout" This is set only for a service call and is triggered if we stop receiving the DATA packets that comprise the request data. It is measured from the last time we received a DATA packet. (3) "Hard timeout" This can be set for a call and specified the maximum lifetime of that call. It should not be specified by default. Some operations (such as volume transfer) take a long time. Allow userspace to set/change the timeouts on a call with sendmsg, using a control message: RXRPC_SET_CALL_TIMEOUTS The data to the message is a number of 32-bit words, not all of which need be given: u32 hard_timeout; /* sec from first packet */ u32 idle_timeout; /* msec from packet Rx */ u32 normal_timeout; /* msec from data Rx */ This can be set in combination with any other sendmsg() that affects a call. Signed-off-by: David Howells <dhowells@redhat.com>
This commit is contained in:
parent
4812417894
commit
a158bdd324
@ -138,10 +138,20 @@ enum rxrpc_rtt_rx_trace {
|
|||||||
|
|
||||||
enum rxrpc_timer_trace {
|
enum rxrpc_timer_trace {
|
||||||
rxrpc_timer_begin,
|
rxrpc_timer_begin,
|
||||||
|
rxrpc_timer_exp_ack,
|
||||||
|
rxrpc_timer_exp_hard,
|
||||||
|
rxrpc_timer_exp_idle,
|
||||||
|
rxrpc_timer_exp_normal,
|
||||||
|
rxrpc_timer_exp_ping,
|
||||||
|
rxrpc_timer_exp_resend,
|
||||||
rxrpc_timer_expired,
|
rxrpc_timer_expired,
|
||||||
rxrpc_timer_init_for_reply,
|
rxrpc_timer_init_for_reply,
|
||||||
rxrpc_timer_init_for_send_reply,
|
rxrpc_timer_init_for_send_reply,
|
||||||
|
rxrpc_timer_restart,
|
||||||
rxrpc_timer_set_for_ack,
|
rxrpc_timer_set_for_ack,
|
||||||
|
rxrpc_timer_set_for_hard,
|
||||||
|
rxrpc_timer_set_for_idle,
|
||||||
|
rxrpc_timer_set_for_normal,
|
||||||
rxrpc_timer_set_for_ping,
|
rxrpc_timer_set_for_ping,
|
||||||
rxrpc_timer_set_for_resend,
|
rxrpc_timer_set_for_resend,
|
||||||
rxrpc_timer_set_for_send,
|
rxrpc_timer_set_for_send,
|
||||||
@ -296,12 +306,22 @@ enum rxrpc_congest_change {
|
|||||||
#define rxrpc_timer_traces \
|
#define rxrpc_timer_traces \
|
||||||
EM(rxrpc_timer_begin, "Begin ") \
|
EM(rxrpc_timer_begin, "Begin ") \
|
||||||
EM(rxrpc_timer_expired, "*EXPR*") \
|
EM(rxrpc_timer_expired, "*EXPR*") \
|
||||||
|
EM(rxrpc_timer_exp_ack, "ExpAck") \
|
||||||
|
EM(rxrpc_timer_exp_hard, "ExpHrd") \
|
||||||
|
EM(rxrpc_timer_exp_idle, "ExpIdl") \
|
||||||
|
EM(rxrpc_timer_exp_normal, "ExpNml") \
|
||||||
|
EM(rxrpc_timer_exp_ping, "ExpPng") \
|
||||||
|
EM(rxrpc_timer_exp_resend, "ExpRsn") \
|
||||||
EM(rxrpc_timer_init_for_reply, "IniRpl") \
|
EM(rxrpc_timer_init_for_reply, "IniRpl") \
|
||||||
EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
|
EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
|
||||||
|
EM(rxrpc_timer_restart, "Restrt") \
|
||||||
EM(rxrpc_timer_set_for_ack, "SetAck") \
|
EM(rxrpc_timer_set_for_ack, "SetAck") \
|
||||||
|
EM(rxrpc_timer_set_for_hard, "SetHrd") \
|
||||||
|
EM(rxrpc_timer_set_for_idle, "SetIdl") \
|
||||||
|
EM(rxrpc_timer_set_for_normal, "SetNml") \
|
||||||
EM(rxrpc_timer_set_for_ping, "SetPng") \
|
EM(rxrpc_timer_set_for_ping, "SetPng") \
|
||||||
EM(rxrpc_timer_set_for_resend, "SetRTx") \
|
EM(rxrpc_timer_set_for_resend, "SetRTx") \
|
||||||
E_(rxrpc_timer_set_for_send, "SetTx ")
|
E_(rxrpc_timer_set_for_send, "SetSnd")
|
||||||
|
|
||||||
#define rxrpc_propose_ack_traces \
|
#define rxrpc_propose_ack_traces \
|
||||||
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
|
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
|
||||||
@ -932,39 +952,44 @@ TRACE_EVENT(rxrpc_rtt_rx,
|
|||||||
|
|
||||||
TRACE_EVENT(rxrpc_timer,
|
TRACE_EVENT(rxrpc_timer,
|
||||||
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
||||||
ktime_t now, unsigned long now_j),
|
unsigned long now),
|
||||||
|
|
||||||
TP_ARGS(call, why, now, now_j),
|
TP_ARGS(call, why, now),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(struct rxrpc_call *, call )
|
__field(struct rxrpc_call *, call )
|
||||||
__field(enum rxrpc_timer_trace, why )
|
__field(enum rxrpc_timer_trace, why )
|
||||||
__field_struct(ktime_t, now )
|
__field(long, now )
|
||||||
__field_struct(ktime_t, expire_at )
|
__field(long, ack_at )
|
||||||
__field_struct(ktime_t, ack_at )
|
__field(long, resend_at )
|
||||||
__field_struct(ktime_t, resend_at )
|
__field(long, ping_at )
|
||||||
__field(unsigned long, now_j )
|
__field(long, expect_rx_by )
|
||||||
__field(unsigned long, timer )
|
__field(long, expect_req_by )
|
||||||
|
__field(long, expect_term_by )
|
||||||
|
__field(long, timer )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->call = call;
|
__entry->call = call;
|
||||||
__entry->why = why;
|
__entry->why = why;
|
||||||
__entry->now = now;
|
__entry->now = now;
|
||||||
__entry->expire_at = call->expire_at;
|
__entry->ack_at = call->ack_at;
|
||||||
__entry->ack_at = call->ack_at;
|
__entry->resend_at = call->resend_at;
|
||||||
__entry->resend_at = call->resend_at;
|
__entry->expect_rx_by = call->expect_rx_by;
|
||||||
__entry->now_j = now_j;
|
__entry->expect_req_by = call->expect_req_by;
|
||||||
__entry->timer = call->timer.expires;
|
__entry->expect_term_by = call->expect_term_by;
|
||||||
|
__entry->timer = call->timer.expires;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
|
TP_printk("c=%p %s a=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
|
||||||
__entry->call,
|
__entry->call,
|
||||||
__print_symbolic(__entry->why, rxrpc_timer_traces),
|
__print_symbolic(__entry->why, rxrpc_timer_traces),
|
||||||
ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
|
__entry->ack_at - __entry->now,
|
||||||
ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
|
__entry->resend_at - __entry->now,
|
||||||
ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
|
__entry->expect_rx_by - __entry->now,
|
||||||
__entry->timer - __entry->now_j)
|
__entry->expect_req_by - __entry->now,
|
||||||
|
__entry->expect_term_by - __entry->now,
|
||||||
|
__entry->timer - __entry->now)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(rxrpc_rx_lose,
|
TRACE_EVENT(rxrpc_rx_lose,
|
||||||
|
@ -59,6 +59,7 @@ enum rxrpc_cmsg_type {
|
|||||||
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
|
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
|
||||||
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
|
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
|
||||||
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
|
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
|
||||||
|
RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
|
||||||
RXRPC__SUPPORTED
|
RXRPC__SUPPORTED
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -468,9 +468,9 @@ enum rxrpc_call_flag {
|
|||||||
enum rxrpc_call_event {
|
enum rxrpc_call_event {
|
||||||
RXRPC_CALL_EV_ACK, /* need to generate ACK */
|
RXRPC_CALL_EV_ACK, /* need to generate ACK */
|
||||||
RXRPC_CALL_EV_ABORT, /* need to generate abort */
|
RXRPC_CALL_EV_ABORT, /* need to generate abort */
|
||||||
RXRPC_CALL_EV_TIMER, /* Timer expired */
|
|
||||||
RXRPC_CALL_EV_RESEND, /* Tx resend required */
|
RXRPC_CALL_EV_RESEND, /* Tx resend required */
|
||||||
RXRPC_CALL_EV_PING, /* Ping send required */
|
RXRPC_CALL_EV_PING, /* Ping send required */
|
||||||
|
RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -514,10 +514,14 @@ struct rxrpc_call {
|
|||||||
struct rxrpc_peer *peer; /* Peer record for remote address */
|
struct rxrpc_peer *peer; /* Peer record for remote address */
|
||||||
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
||||||
struct mutex user_mutex; /* User access mutex */
|
struct mutex user_mutex; /* User access mutex */
|
||||||
ktime_t ack_at; /* When deferred ACK needs to happen */
|
unsigned long ack_at; /* When deferred ACK needs to happen */
|
||||||
ktime_t resend_at; /* When next resend needs to happen */
|
unsigned long resend_at; /* When next resend needs to happen */
|
||||||
ktime_t ping_at; /* When next to send a ping */
|
unsigned long ping_at; /* When next to send a ping */
|
||||||
ktime_t expire_at; /* When the call times out */
|
unsigned long expect_rx_by; /* When we expect to get a packet by */
|
||||||
|
unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
|
||||||
|
unsigned long expect_term_by; /* When we expect call termination by */
|
||||||
|
u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
|
||||||
|
u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
|
||||||
struct timer_list timer; /* Combined event timer */
|
struct timer_list timer; /* Combined event timer */
|
||||||
struct work_struct processor; /* Event processor */
|
struct work_struct processor; /* Event processor */
|
||||||
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
||||||
@ -697,12 +701,19 @@ int rxrpc_reject_call(struct rxrpc_sock *);
|
|||||||
/*
|
/*
|
||||||
* call_event.c
|
* call_event.c
|
||||||
*/
|
*/
|
||||||
void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
|
|
||||||
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
|
|
||||||
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
|
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
|
||||||
enum rxrpc_propose_ack_trace);
|
enum rxrpc_propose_ack_trace);
|
||||||
void rxrpc_process_call(struct work_struct *);
|
void rxrpc_process_call(struct work_struct *);
|
||||||
|
|
||||||
|
static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
|
||||||
|
unsigned long expire_at,
|
||||||
|
unsigned long now,
|
||||||
|
enum rxrpc_timer_trace why)
|
||||||
|
{
|
||||||
|
trace_rxrpc_timer(call, why, now);
|
||||||
|
timer_reduce(&call->timer, expire_at);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* call_object.c
|
* call_object.c
|
||||||
*/
|
*/
|
||||||
@ -843,8 +854,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
|
|||||||
*/
|
*/
|
||||||
extern unsigned int rxrpc_max_client_connections;
|
extern unsigned int rxrpc_max_client_connections;
|
||||||
extern unsigned int rxrpc_reap_client_connections;
|
extern unsigned int rxrpc_reap_client_connections;
|
||||||
extern unsigned int rxrpc_conn_idle_client_expiry;
|
extern unsigned long rxrpc_conn_idle_client_expiry;
|
||||||
extern unsigned int rxrpc_conn_idle_client_fast_expiry;
|
extern unsigned long rxrpc_conn_idle_client_fast_expiry;
|
||||||
extern struct idr rxrpc_client_conn_ids;
|
extern struct idr rxrpc_client_conn_ids;
|
||||||
|
|
||||||
void rxrpc_destroy_client_conn_ids(void);
|
void rxrpc_destroy_client_conn_ids(void);
|
||||||
@ -976,13 +987,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local)
|
|||||||
* misc.c
|
* misc.c
|
||||||
*/
|
*/
|
||||||
extern unsigned int rxrpc_max_backlog __read_mostly;
|
extern unsigned int rxrpc_max_backlog __read_mostly;
|
||||||
extern unsigned int rxrpc_requested_ack_delay;
|
extern unsigned long rxrpc_requested_ack_delay;
|
||||||
extern unsigned int rxrpc_soft_ack_delay;
|
extern unsigned long rxrpc_soft_ack_delay;
|
||||||
extern unsigned int rxrpc_idle_ack_delay;
|
extern unsigned long rxrpc_idle_ack_delay;
|
||||||
extern unsigned int rxrpc_rx_window_size;
|
extern unsigned int rxrpc_rx_window_size;
|
||||||
extern unsigned int rxrpc_rx_mtu;
|
extern unsigned int rxrpc_rx_mtu;
|
||||||
extern unsigned int rxrpc_rx_jumbo_max;
|
extern unsigned int rxrpc_rx_jumbo_max;
|
||||||
extern unsigned int rxrpc_resend_timeout;
|
extern unsigned long rxrpc_resend_timeout;
|
||||||
|
|
||||||
extern const s8 rxrpc_ack_priority[];
|
extern const s8 rxrpc_ack_priority[];
|
||||||
|
|
||||||
|
@ -21,80 +21,6 @@
|
|||||||
#include <net/af_rxrpc.h>
|
#include <net/af_rxrpc.h>
|
||||||
#include "ar-internal.h"
|
#include "ar-internal.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the timer
|
|
||||||
*/
|
|
||||||
void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
|
||||||
ktime_t now)
|
|
||||||
{
|
|
||||||
unsigned long t_j, now_j = jiffies;
|
|
||||||
ktime_t t;
|
|
||||||
bool queue = false;
|
|
||||||
|
|
||||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
|
||||||
t = call->expire_at;
|
|
||||||
if (!ktime_after(t, now)) {
|
|
||||||
trace_rxrpc_timer(call, why, now, now_j);
|
|
||||||
queue = true;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ktime_after(call->resend_at, now)) {
|
|
||||||
call->resend_at = call->expire_at;
|
|
||||||
if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
|
|
||||||
queue = true;
|
|
||||||
} else if (ktime_before(call->resend_at, t)) {
|
|
||||||
t = call->resend_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ktime_after(call->ack_at, now)) {
|
|
||||||
call->ack_at = call->expire_at;
|
|
||||||
if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
|
|
||||||
queue = true;
|
|
||||||
} else if (ktime_before(call->ack_at, t)) {
|
|
||||||
t = call->ack_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ktime_after(call->ping_at, now)) {
|
|
||||||
call->ping_at = call->expire_at;
|
|
||||||
if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
|
|
||||||
queue = true;
|
|
||||||
} else if (ktime_before(call->ping_at, t)) {
|
|
||||||
t = call->ping_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
|
|
||||||
t_j += jiffies;
|
|
||||||
|
|
||||||
/* We have to make sure that the calculated jiffies value falls
|
|
||||||
* at or after the nsec value, or we may loop ceaselessly
|
|
||||||
* because the timer times out, but we haven't reached the nsec
|
|
||||||
* timeout yet.
|
|
||||||
*/
|
|
||||||
t_j++;
|
|
||||||
|
|
||||||
if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
|
|
||||||
mod_timer(&call->timer, t_j);
|
|
||||||
trace_rxrpc_timer(call, why, now, now_j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (queue)
|
|
||||||
rxrpc_queue_call(call);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the timer
|
|
||||||
*/
|
|
||||||
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
|
||||||
ktime_t now)
|
|
||||||
{
|
|
||||||
read_lock_bh(&call->state_lock);
|
|
||||||
__rxrpc_set_timer(call, why, now);
|
|
||||||
read_unlock_bh(&call->state_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Propose a PING ACK be sent.
|
* Propose a PING ACK be sent.
|
||||||
*/
|
*/
|
||||||
@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
|
|||||||
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
|
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
|
||||||
rxrpc_queue_call(call);
|
rxrpc_queue_call(call);
|
||||||
} else {
|
} else {
|
||||||
ktime_t now = ktime_get_real();
|
unsigned long now = jiffies;
|
||||||
ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
|
unsigned long ping_at = now + rxrpc_idle_ack_delay;
|
||||||
|
|
||||||
if (ktime_before(ping_at, call->ping_at)) {
|
if (time_before(ping_at, call->ping_at)) {
|
||||||
call->ping_at = ping_at;
|
WRITE_ONCE(call->ping_at, ping_at);
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
|
rxrpc_reduce_call_timer(call, ping_at, now,
|
||||||
|
rxrpc_timer_set_for_ping);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|||||||
enum rxrpc_propose_ack_trace why)
|
enum rxrpc_propose_ack_trace why)
|
||||||
{
|
{
|
||||||
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
|
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
|
||||||
unsigned int expiry = rxrpc_soft_ack_delay;
|
unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay;
|
||||||
ktime_t now, ack_at;
|
|
||||||
s8 prior = rxrpc_ack_priority[ack_reason];
|
s8 prior = rxrpc_ack_priority[ack_reason];
|
||||||
|
|
||||||
/* Pings are handled specially because we don't want to accidentally
|
/* Pings are handled specially because we don't want to accidentally
|
||||||
@ -190,11 +116,12 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|||||||
background)
|
background)
|
||||||
rxrpc_queue_call(call);
|
rxrpc_queue_call(call);
|
||||||
} else {
|
} else {
|
||||||
now = ktime_get_real();
|
now = jiffies;
|
||||||
ack_at = ktime_add_ms(now, expiry);
|
ack_at = jiffies + expiry;
|
||||||
if (ktime_before(ack_at, call->ack_at)) {
|
if (time_before(ack_at, call->ack_at)) {
|
||||||
call->ack_at = ack_at;
|
WRITE_ONCE(call->ack_at, ack_at);
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
|
rxrpc_reduce_call_timer(call, ack_at, now,
|
||||||
|
rxrpc_timer_set_for_ack);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,18 +154,20 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
|
|||||||
/*
|
/*
|
||||||
* Perform retransmission of NAK'd and unack'd packets.
|
* Perform retransmission of NAK'd and unack'd packets.
|
||||||
*/
|
*/
|
||||||
static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
|
||||||
{
|
{
|
||||||
struct rxrpc_skb_priv *sp;
|
struct rxrpc_skb_priv *sp;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
unsigned long resend_at;
|
||||||
rxrpc_seq_t cursor, seq, top;
|
rxrpc_seq_t cursor, seq, top;
|
||||||
ktime_t max_age, oldest, ack_ts;
|
ktime_t now, max_age, oldest, ack_ts;
|
||||||
int ix;
|
int ix;
|
||||||
u8 annotation, anno_type, retrans = 0, unacked = 0;
|
u8 annotation, anno_type, retrans = 0, unacked = 0;
|
||||||
|
|
||||||
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
|
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
|
||||||
|
|
||||||
max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
|
now = ktime_get_real();
|
||||||
|
max_age = ktime_sub_ms(now, rxrpc_resend_timeout * 1000 / HZ);
|
||||||
|
|
||||||
spin_lock_bh(&call->lock);
|
spin_lock_bh(&call->lock);
|
||||||
|
|
||||||
@ -282,7 +211,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
|||||||
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
|
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
|
||||||
}
|
}
|
||||||
|
|
||||||
call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
|
resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
|
||||||
|
resend_at += jiffies + rxrpc_resend_timeout;
|
||||||
|
WRITE_ONCE(call->resend_at, resend_at);
|
||||||
|
|
||||||
if (unacked)
|
if (unacked)
|
||||||
rxrpc_congestion_timeout(call);
|
rxrpc_congestion_timeout(call);
|
||||||
@ -292,7 +223,8 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
|||||||
* retransmitting data.
|
* retransmitting data.
|
||||||
*/
|
*/
|
||||||
if (!retrans) {
|
if (!retrans) {
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
|
rxrpc_reduce_call_timer(call, resend_at, now,
|
||||||
|
rxrpc_timer_set_for_resend);
|
||||||
spin_unlock_bh(&call->lock);
|
spin_unlock_bh(&call->lock);
|
||||||
ack_ts = ktime_sub(now, call->acks_latest_ts);
|
ack_ts = ktime_sub(now, call->acks_latest_ts);
|
||||||
if (ktime_to_ns(ack_ts) < call->peer->rtt)
|
if (ktime_to_ns(ack_ts) < call->peer->rtt)
|
||||||
@ -364,7 +296,7 @@ void rxrpc_process_call(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct rxrpc_call *call =
|
struct rxrpc_call *call =
|
||||||
container_of(work, struct rxrpc_call, processor);
|
container_of(work, struct rxrpc_call, processor);
|
||||||
ktime_t now;
|
unsigned long now, next, t;
|
||||||
|
|
||||||
rxrpc_see_call(call);
|
rxrpc_see_call(call);
|
||||||
|
|
||||||
@ -384,8 +316,50 @@ recheck_state:
|
|||||||
goto out_put;
|
goto out_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
now = ktime_get_real();
|
/* Work out if any timeouts tripped */
|
||||||
if (ktime_before(call->expire_at, now)) {
|
now = jiffies;
|
||||||
|
t = READ_ONCE(call->expect_rx_by);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
|
||||||
|
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->expect_req_by);
|
||||||
|
if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
|
||||||
|
time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
|
||||||
|
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->expect_term_by);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
|
||||||
|
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->ack_at);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
|
||||||
|
cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
|
||||||
|
set_bit(RXRPC_CALL_EV_ACK, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->ping_at);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
|
||||||
|
cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
|
||||||
|
set_bit(RXRPC_CALL_EV_PING, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->resend_at);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
|
||||||
|
cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
|
||||||
|
set_bit(RXRPC_CALL_EV_RESEND, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Process events */
|
||||||
|
if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
|
||||||
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
|
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
|
||||||
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
||||||
goto recheck_state;
|
goto recheck_state;
|
||||||
@ -408,7 +382,22 @@ recheck_state:
|
|||||||
goto recheck_state;
|
goto recheck_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
|
/* Make sure the timer is restarted */
|
||||||
|
next = call->expect_rx_by;
|
||||||
|
|
||||||
|
#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
|
||||||
|
|
||||||
|
set(call->expect_req_by);
|
||||||
|
set(call->expect_term_by);
|
||||||
|
set(call->ack_at);
|
||||||
|
set(call->resend_at);
|
||||||
|
set(call->ping_at);
|
||||||
|
|
||||||
|
now = jiffies;
|
||||||
|
if (time_after_eq(now, next))
|
||||||
|
goto recheck_state;
|
||||||
|
|
||||||
|
rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
|
||||||
|
|
||||||
/* other events may have been raised since we started checking */
|
/* other events may have been raised since we started checking */
|
||||||
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
|
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
|
||||||
|
@ -51,8 +51,10 @@ static void rxrpc_call_timer_expired(unsigned long _call)
|
|||||||
|
|
||||||
_enter("%d", call->debug_id);
|
_enter("%d", call->debug_id);
|
||||||
|
|
||||||
if (call->state < RXRPC_CALL_COMPLETE)
|
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||||
rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
|
trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
|
||||||
|
rxrpc_queue_call(call);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
|
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
|
||||||
@ -139,6 +141,8 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
|
|||||||
atomic_set(&call->usage, 1);
|
atomic_set(&call->usage, 1);
|
||||||
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
||||||
call->tx_total_len = -1;
|
call->tx_total_len = -1;
|
||||||
|
call->next_rx_timo = 20 * HZ;
|
||||||
|
call->next_req_timo = 1 * HZ;
|
||||||
|
|
||||||
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
|
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
|
||||||
|
|
||||||
@ -189,15 +193,16 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
|
|||||||
*/
|
*/
|
||||||
static void rxrpc_start_call_timer(struct rxrpc_call *call)
|
static void rxrpc_start_call_timer(struct rxrpc_call *call)
|
||||||
{
|
{
|
||||||
ktime_t now = ktime_get_real(), expire_at;
|
unsigned long now = jiffies;
|
||||||
|
unsigned long j = now + MAX_JIFFY_OFFSET;
|
||||||
|
|
||||||
expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
|
call->ack_at = j;
|
||||||
call->expire_at = expire_at;
|
call->resend_at = j;
|
||||||
call->ack_at = expire_at;
|
call->ping_at = j;
|
||||||
call->ping_at = expire_at;
|
call->expect_rx_by = j;
|
||||||
call->resend_at = expire_at;
|
call->expect_req_by = j;
|
||||||
call->timer.expires = jiffies + LONG_MAX / 2;
|
call->expect_term_by = j;
|
||||||
rxrpc_set_timer(call, rxrpc_timer_begin, now);
|
call->timer.expires = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -85,8 +85,8 @@
|
|||||||
|
|
||||||
__read_mostly unsigned int rxrpc_max_client_connections = 1000;
|
__read_mostly unsigned int rxrpc_max_client_connections = 1000;
|
||||||
__read_mostly unsigned int rxrpc_reap_client_connections = 900;
|
__read_mostly unsigned int rxrpc_reap_client_connections = 900;
|
||||||
__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
|
__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
|
||||||
__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
|
__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use machine-unique IDs for our client connections.
|
* We use machine-unique IDs for our client connections.
|
||||||
|
@ -318,16 +318,18 @@ bad_state:
|
|||||||
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
|
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
|
||||||
{
|
{
|
||||||
struct rxrpc_ack_summary summary = { 0 };
|
struct rxrpc_ack_summary summary = { 0 };
|
||||||
|
unsigned long now, timo;
|
||||||
rxrpc_seq_t top = READ_ONCE(call->tx_top);
|
rxrpc_seq_t top = READ_ONCE(call->tx_top);
|
||||||
|
|
||||||
if (call->ackr_reason) {
|
if (call->ackr_reason) {
|
||||||
spin_lock_bh(&call->lock);
|
spin_lock_bh(&call->lock);
|
||||||
call->ackr_reason = 0;
|
call->ackr_reason = 0;
|
||||||
call->resend_at = call->expire_at;
|
|
||||||
call->ack_at = call->expire_at;
|
|
||||||
spin_unlock_bh(&call->lock);
|
spin_unlock_bh(&call->lock);
|
||||||
rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
|
now = jiffies;
|
||||||
ktime_get_real());
|
timo = now + MAX_JIFFY_OFFSET;
|
||||||
|
WRITE_ONCE(call->resend_at, timo);
|
||||||
|
WRITE_ONCE(call->ack_at, timo);
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
|
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
|
||||||
@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
|
|||||||
if (state >= RXRPC_CALL_COMPLETE)
|
if (state >= RXRPC_CALL_COMPLETE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
|
||||||
|
unsigned long timo = READ_ONCE(call->next_req_timo);
|
||||||
|
unsigned long now, expect_req_by;
|
||||||
|
|
||||||
|
if (timo) {
|
||||||
|
now = jiffies;
|
||||||
|
expect_req_by = now + timo;
|
||||||
|
WRITE_ONCE(call->expect_req_by, expect_req_by);
|
||||||
|
rxrpc_reduce_call_timer(call, expect_req_by, now,
|
||||||
|
rxrpc_timer_set_for_idle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Received data implicitly ACKs all of the request packets we sent
|
/* Received data implicitly ACKs all of the request packets we sent
|
||||||
* when we're acting as a client.
|
* when we're acting as a client.
|
||||||
*/
|
*/
|
||||||
@ -908,9 +923,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
|
|||||||
struct sk_buff *skb, u16 skew)
|
struct sk_buff *skb, u16 skew)
|
||||||
{
|
{
|
||||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||||
|
unsigned long timo;
|
||||||
|
|
||||||
_enter("%p,%p", call, skb);
|
_enter("%p,%p", call, skb);
|
||||||
|
|
||||||
|
timo = READ_ONCE(call->next_rx_timo);
|
||||||
|
if (timo) {
|
||||||
|
unsigned long now = jiffies, expect_rx_by;
|
||||||
|
|
||||||
|
expect_rx_by = jiffies + timo;
|
||||||
|
WRITE_ONCE(call->expect_rx_by, expect_rx_by);
|
||||||
|
rxrpc_reduce_call_timer(call, expect_rx_by, now,
|
||||||
|
rxrpc_timer_set_for_normal);
|
||||||
|
}
|
||||||
|
|
||||||
switch (sp->hdr.type) {
|
switch (sp->hdr.type) {
|
||||||
case RXRPC_PACKET_TYPE_DATA:
|
case RXRPC_PACKET_TYPE_DATA:
|
||||||
rxrpc_input_data(call, skb, skew);
|
rxrpc_input_data(call, skb, skew);
|
||||||
|
@ -20,34 +20,29 @@
|
|||||||
*/
|
*/
|
||||||
unsigned int rxrpc_max_backlog __read_mostly = 10;
|
unsigned int rxrpc_max_backlog __read_mostly = 10;
|
||||||
|
|
||||||
/*
|
|
||||||
* Maximum lifetime of a call (in mx).
|
|
||||||
*/
|
|
||||||
unsigned int rxrpc_max_call_lifetime = 60 * 1000;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* How long to wait before scheduling ACK generation after seeing a
|
* How long to wait before scheduling ACK generation after seeing a
|
||||||
* packet with RXRPC_REQUEST_ACK set (in ms).
|
* packet with RXRPC_REQUEST_ACK set (in jiffies).
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_requested_ack_delay = 1;
|
unsigned long rxrpc_requested_ack_delay = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* How long to wait before scheduling an ACK with subtype DELAY (in ms).
|
* How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
|
||||||
*
|
*
|
||||||
* We use this when we've received new data packets. If those packets aren't
|
* We use this when we've received new data packets. If those packets aren't
|
||||||
* all consumed within this time we will send a DELAY ACK if an ACK was not
|
* all consumed within this time we will send a DELAY ACK if an ACK was not
|
||||||
* requested to let the sender know it doesn't need to resend.
|
* requested to let the sender know it doesn't need to resend.
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_soft_ack_delay = 1 * 1000;
|
unsigned long rxrpc_soft_ack_delay = HZ;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* How long to wait before scheduling an ACK with subtype IDLE (in ms).
|
* How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
|
||||||
*
|
*
|
||||||
* We use this when we've consumed some previously soft-ACK'd packets when
|
* We use this when we've consumed some previously soft-ACK'd packets when
|
||||||
* further packets aren't immediately received to decide when to send an IDLE
|
* further packets aren't immediately received to decide when to send an IDLE
|
||||||
* ACK let the other end know that it can free up its Tx buffer space.
|
* ACK let the other end know that it can free up its Tx buffer space.
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
|
unsigned long rxrpc_idle_ack_delay = HZ / 2;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Receive window size in packets. This indicates the maximum number of
|
* Receive window size in packets. This indicates the maximum number of
|
||||||
@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4;
|
|||||||
/*
|
/*
|
||||||
* Time till packet resend (in milliseconds).
|
* Time till packet resend (in milliseconds).
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_resend_timeout = 4 * 1000;
|
unsigned long rxrpc_resend_timeout = 4 * HZ;
|
||||||
|
|
||||||
const s8 rxrpc_ack_priority[] = {
|
const s8 rxrpc_ack_priority[] = {
|
||||||
[0] = 0,
|
[0] = 0,
|
||||||
|
@ -163,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
|
|||||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||||
call->tx_phase = true;
|
call->tx_phase = true;
|
||||||
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
|
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
|
||||||
call->ack_at = call->expire_at;
|
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
|
||||||
write_unlock_bh(&call->state_lock);
|
write_unlock_bh(&call->state_lock);
|
||||||
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
|
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
|
||||||
rxrpc_propose_ack_processing_op);
|
rxrpc_propose_ack_processing_op);
|
||||||
|
@ -158,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
|||||||
rxrpc_notify_end_tx_t notify_end_tx)
|
rxrpc_notify_end_tx_t notify_end_tx)
|
||||||
{
|
{
|
||||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||||
|
unsigned long now;
|
||||||
rxrpc_seq_t seq = sp->hdr.seq;
|
rxrpc_seq_t seq = sp->hdr.seq;
|
||||||
int ret, ix;
|
int ret, ix;
|
||||||
u8 annotation = RXRPC_TX_ANNO_UNACK;
|
u8 annotation = RXRPC_TX_ANNO_UNACK;
|
||||||
@ -197,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
|||||||
break;
|
break;
|
||||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||||
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
|
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
|
||||||
call->ack_at = call->expire_at;
|
now = jiffies;
|
||||||
|
WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
|
||||||
if (call->ackr_reason == RXRPC_ACK_DELAY)
|
if (call->ackr_reason == RXRPC_ACK_DELAY)
|
||||||
call->ackr_reason = 0;
|
call->ackr_reason = 0;
|
||||||
__rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
|
trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
|
||||||
ktime_get_real());
|
|
||||||
if (!last)
|
if (!last)
|
||||||
break;
|
break;
|
||||||
/* Fall through */
|
/* Fall through */
|
||||||
@ -223,14 +224,12 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
|||||||
_debug("need instant resend %d", ret);
|
_debug("need instant resend %d", ret);
|
||||||
rxrpc_instant_resend(call, ix);
|
rxrpc_instant_resend(call, ix);
|
||||||
} else {
|
} else {
|
||||||
ktime_t now = ktime_get_real(), resend_at;
|
unsigned long now = jiffies, resend_at;
|
||||||
|
|
||||||
resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
|
resend_at = now + rxrpc_resend_timeout;
|
||||||
|
WRITE_ONCE(call->resend_at, resend_at);
|
||||||
if (ktime_before(resend_at, call->resend_at)) {
|
rxrpc_reduce_call_timer(call, resend_at, now,
|
||||||
call->resend_at = resend_at;
|
rxrpc_timer_set_for_send);
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
|
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
|
||||||
@ -513,6 +512,19 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case RXRPC_SET_CALL_TIMEOUT:
|
||||||
|
if (len & 3 || len < 4 || len > 12)
|
||||||
|
return -EINVAL;
|
||||||
|
memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
|
||||||
|
p->call.nr_timeouts = len / 4;
|
||||||
|
if (p->call.timeouts.hard > INT_MAX / HZ)
|
||||||
|
return -ERANGE;
|
||||||
|
if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
|
||||||
|
return -ERANGE;
|
||||||
|
if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
|
||||||
|
return -ERANGE;
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -577,11 +589,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
|||||||
{
|
{
|
||||||
enum rxrpc_call_state state;
|
enum rxrpc_call_state state;
|
||||||
struct rxrpc_call *call;
|
struct rxrpc_call *call;
|
||||||
|
unsigned long now, j;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
struct rxrpc_send_params p = {
|
struct rxrpc_send_params p = {
|
||||||
.call.tx_total_len = -1,
|
.call.tx_total_len = -1,
|
||||||
.call.user_call_ID = 0,
|
.call.user_call_ID = 0,
|
||||||
|
.call.nr_timeouts = 0,
|
||||||
.abort_code = 0,
|
.abort_code = 0,
|
||||||
.command = RXRPC_CMD_SEND_DATA,
|
.command = RXRPC_CMD_SEND_DATA,
|
||||||
.exclusive = false,
|
.exclusive = false,
|
||||||
@ -646,6 +660,31 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (p.call.nr_timeouts) {
|
||||||
|
case 3:
|
||||||
|
j = msecs_to_jiffies(p.call.timeouts.normal);
|
||||||
|
if (p.call.timeouts.normal > 0 && j == 0)
|
||||||
|
j = 1;
|
||||||
|
WRITE_ONCE(call->next_rx_timo, j);
|
||||||
|
/* Fall through */
|
||||||
|
case 2:
|
||||||
|
j = msecs_to_jiffies(p.call.timeouts.idle);
|
||||||
|
if (p.call.timeouts.idle > 0 && j == 0)
|
||||||
|
j = 1;
|
||||||
|
WRITE_ONCE(call->next_req_timo, j);
|
||||||
|
/* Fall through */
|
||||||
|
case 1:
|
||||||
|
if (p.call.timeouts.hard > 0) {
|
||||||
|
j = msecs_to_jiffies(p.call.timeouts.hard);
|
||||||
|
now = jiffies;
|
||||||
|
j += now;
|
||||||
|
WRITE_ONCE(call->expect_term_by, j);
|
||||||
|
rxrpc_reduce_call_timer(call, j, now,
|
||||||
|
rxrpc_timer_set_for_hard);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
state = READ_ONCE(call->state);
|
state = READ_ONCE(call->state);
|
||||||
_debug("CALL %d USR %lx ST %d on CONN %p",
|
_debug("CALL %d USR %lx ST %d on CONN %p",
|
||||||
call->debug_id, call->user_call_ID, state, call->conn);
|
call->debug_id, call->user_call_ID, state, call->conn);
|
||||||
|
@ -21,6 +21,8 @@ static const unsigned int four = 4;
|
|||||||
static const unsigned int thirtytwo = 32;
|
static const unsigned int thirtytwo = 32;
|
||||||
static const unsigned int n_65535 = 65535;
|
static const unsigned int n_65535 = 65535;
|
||||||
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
|
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
|
||||||
|
static const unsigned long one_jiffy = 1;
|
||||||
|
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RxRPC operating parameters.
|
* RxRPC operating parameters.
|
||||||
@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
|
|||||||
* information on the individual parameters.
|
* information on the individual parameters.
|
||||||
*/
|
*/
|
||||||
static struct ctl_table rxrpc_sysctl_table[] = {
|
static struct ctl_table rxrpc_sysctl_table[] = {
|
||||||
/* Values measured in milliseconds */
|
/* Values measured in milliseconds but used in jiffies */
|
||||||
{
|
{
|
||||||
.procname = "req_ack_delay",
|
.procname = "req_ack_delay",
|
||||||
.data = &rxrpc_requested_ack_delay,
|
.data = &rxrpc_requested_ack_delay,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&zero,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "soft_ack_delay",
|
.procname = "soft_ack_delay",
|
||||||
.data = &rxrpc_soft_ack_delay,
|
.data = &rxrpc_soft_ack_delay,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "idle_ack_delay",
|
.procname = "idle_ack_delay",
|
||||||
.data = &rxrpc_idle_ack_delay,
|
.data = &rxrpc_idle_ack_delay,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
},
|
.extra2 = (void *)&max_jiffies,
|
||||||
{
|
|
||||||
.procname = "resend_timeout",
|
|
||||||
.data = &rxrpc_resend_timeout,
|
|
||||||
.maxlen = sizeof(unsigned int),
|
|
||||||
.mode = 0644,
|
|
||||||
.proc_handler = proc_dointvec,
|
|
||||||
.extra1 = (void *)&one,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "idle_conn_expiry",
|
.procname = "idle_conn_expiry",
|
||||||
.data = &rxrpc_conn_idle_client_expiry,
|
.data = &rxrpc_conn_idle_client_expiry,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec_ms_jiffies,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "idle_conn_fast_expiry",
|
.procname = "idle_conn_fast_expiry",
|
||||||
.data = &rxrpc_conn_idle_client_fast_expiry,
|
.data = &rxrpc_conn_idle_client_fast_expiry,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec_ms_jiffies,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
|
|
||||||
/* Values measured in seconds but used in jiffies */
|
|
||||||
{
|
{
|
||||||
.procname = "max_call_lifetime",
|
.procname = "resend_timeout",
|
||||||
.data = &rxrpc_max_call_lifetime,
|
.data = &rxrpc_resend_timeout,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
|
|
||||||
/* Non-time values */
|
/* Non-time values */
|
||||||
|
Loading…
Reference in New Issue
Block a user