2007-04-26 22:48:28 +00:00
|
|
|
/* AF_RXRPC internal definitions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
2016-04-04 13:00:32 +00:00
|
|
|
#include <linux/atomic.h>
|
2016-07-01 06:51:50 +00:00
|
|
|
#include <linux/seqlock.h>
|
2016-04-07 16:23:58 +00:00
|
|
|
#include <net/sock.h>
|
2016-04-04 13:00:32 +00:00
|
|
|
#include <net/af_rxrpc.h>
|
2007-04-26 22:48:28 +00:00
|
|
|
#include <rxrpc/packet.h>
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#define CHECK_SLAB_OKAY(X) \
|
|
|
|
BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
|
|
|
|
(POISON_FREE << 8 | POISON_FREE))
|
|
|
|
#else
|
2016-03-04 15:56:19 +00:00
|
|
|
#define CHECK_SLAB_OKAY(X) do {} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define FCRYPT_BSIZE 8
|
|
|
|
struct rxrpc_crypt {
|
|
|
|
union {
|
|
|
|
u8 x[FCRYPT_BSIZE];
|
2008-03-29 03:08:38 +00:00
|
|
|
__be32 n[2];
|
2007-04-26 22:48:28 +00:00
|
|
|
};
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-26 22:50:17 +00:00
|
|
|
#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
|
|
|
|
#define rxrpc_queue_delayed_work(WS,D) \
|
|
|
|
queue_delayed_work(rxrpc_workqueue, (WS), (D))
|
|
|
|
|
2016-04-04 13:00:37 +00:00
|
|
|
struct rxrpc_connection;
|
|
|
|
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 19:42:14 +00:00
|
|
|
/*
|
|
|
|
* Mark applied to socket buffers.
|
|
|
|
*/
|
|
|
|
enum rxrpc_skb_mark {
|
|
|
|
RXRPC_SKB_MARK_DATA, /* data message */
|
|
|
|
RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
|
|
|
|
RXRPC_SKB_MARK_BUSY, /* server busy message */
|
|
|
|
RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
|
|
|
|
RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
|
|
|
|
RXRPC_SKB_MARK_NET_ERROR, /* network error message */
|
|
|
|
RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
|
|
|
|
RXRPC_SKB_MARK_NEW_CALL, /* local error message */
|
|
|
|
};
|
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
|
|
|
* sk_state for RxRPC sockets
|
|
|
|
*/
|
|
|
|
enum {
|
2016-06-09 22:02:51 +00:00
|
|
|
RXRPC_UNBOUND = 0,
|
|
|
|
RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
|
2007-04-26 22:48:28 +00:00
|
|
|
RXRPC_CLIENT_BOUND, /* client local address bound */
|
|
|
|
RXRPC_SERVER_BOUND, /* server local address bound */
|
|
|
|
RXRPC_SERVER_LISTENING, /* server listening for connections */
|
|
|
|
RXRPC_CLOSE, /* socket is being closed */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RxRPC socket definition
|
|
|
|
*/
|
|
|
|
struct rxrpc_sock {
|
|
|
|
/* WARNING: sk has to be the first member */
|
|
|
|
struct sock sk;
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 19:42:14 +00:00
|
|
|
rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct rxrpc_local *local; /* local endpoint */
|
2016-09-08 10:10:11 +00:00
|
|
|
struct hlist_node listen_link; /* link in the local endpoint's listen list */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct list_head secureq; /* calls awaiting connection security clearance */
|
|
|
|
struct list_head acceptq; /* calls awaiting acceptance */
|
|
|
|
struct key *key; /* security for this socket */
|
|
|
|
struct key *securities; /* list of server security descriptors */
|
|
|
|
struct rb_root calls; /* outstanding calls on this socket */
|
|
|
|
unsigned long flags;
|
2016-06-09 22:02:51 +00:00
|
|
|
#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
|
2007-04-26 22:48:28 +00:00
|
|
|
rwlock_t call_lock; /* lock for calls */
|
|
|
|
u32 min_sec_level; /* minimum security level */
|
|
|
|
#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
|
2016-04-04 13:00:37 +00:00
|
|
|
bool exclusive; /* Exclusive connection for a client socket */
|
|
|
|
sa_family_t family; /* Protocol family created with */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct sockaddr_rxrpc srx; /* local address */
|
2016-06-09 22:02:51 +00:00
|
|
|
struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
|
2007-04-26 22:48:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
|
|
|
|
|
2016-03-04 15:53:46 +00:00
|
|
|
/*
|
|
|
|
* CPU-byteorder normalised Rx packet header.
|
|
|
|
*/
|
|
|
|
struct rxrpc_host_header {
|
|
|
|
u32 epoch; /* client boot timestamp */
|
|
|
|
u32 cid; /* connection and channel ID */
|
|
|
|
u32 callNumber; /* call ID (0 for connection-level packets) */
|
|
|
|
u32 seq; /* sequence number of pkt in call stream */
|
|
|
|
u32 serial; /* serial number of pkt sent to network */
|
|
|
|
u8 type; /* packet type */
|
|
|
|
u8 flags; /* packet flags */
|
|
|
|
u8 userStatus; /* app-layer defined status */
|
|
|
|
u8 securityIndex; /* security protocol ID */
|
|
|
|
union {
|
|
|
|
u16 _rsvd; /* reserved */
|
|
|
|
u16 cksum; /* kerberos security checksum */
|
|
|
|
};
|
|
|
|
u16 serviceId; /* service ID */
|
|
|
|
} __packed;
|
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
|
|
|
* RxRPC socket buffer private variables
|
|
|
|
* - max 48 bytes (struct sk_buff::cb)
|
|
|
|
*/
|
|
|
|
struct rxrpc_skb_priv {
|
|
|
|
struct rxrpc_call *call; /* call with which associated */
|
|
|
|
unsigned long resend_at; /* time in jiffies at which to resend */
|
|
|
|
union {
|
2012-04-15 05:58:06 +00:00
|
|
|
unsigned int offset; /* offset into buffer of next read */
|
2007-04-26 22:48:28 +00:00
|
|
|
int remain; /* amount of space remaining for next write */
|
|
|
|
u32 error; /* network error code */
|
|
|
|
bool need_resend; /* T if needs resending */
|
|
|
|
};
|
|
|
|
|
2016-03-04 15:53:46 +00:00
|
|
|
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
|
2007-04-26 22:48:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RxRPC security module interface
|
|
|
|
*/
|
|
|
|
struct rxrpc_security {
|
|
|
|
const char *name; /* name of this service */
|
|
|
|
u8 security_index; /* security type provided */
|
|
|
|
|
2016-04-07 16:23:51 +00:00
|
|
|
/* Initialise a security service */
|
|
|
|
int (*init)(void);
|
|
|
|
|
|
|
|
/* Clean up a security service */
|
|
|
|
void (*exit)(void);
|
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/* initialise a connection's security */
|
|
|
|
int (*init_connection_security)(struct rxrpc_connection *);
|
|
|
|
|
|
|
|
/* prime a connection's packet security */
|
2016-06-26 21:55:24 +00:00
|
|
|
int (*prime_packet_security)(struct rxrpc_connection *);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/* impose security on a packet */
|
2016-06-26 21:55:24 +00:00
|
|
|
int (*secure_packet)(struct rxrpc_call *,
|
2007-04-26 22:48:28 +00:00
|
|
|
struct sk_buff *,
|
|
|
|
size_t,
|
|
|
|
void *);
|
|
|
|
|
|
|
|
/* verify the security on a received packet */
|
2016-09-06 21:19:51 +00:00
|
|
|
int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
|
|
|
|
rxrpc_seq_t, u16);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/* issue a challenge */
|
|
|
|
int (*issue_challenge)(struct rxrpc_connection *);
|
|
|
|
|
|
|
|
/* respond to a challenge */
|
|
|
|
int (*respond_to_challenge)(struct rxrpc_connection *,
|
|
|
|
struct sk_buff *,
|
|
|
|
u32 *);
|
|
|
|
|
|
|
|
/* verify a response */
|
|
|
|
int (*verify_response)(struct rxrpc_connection *,
|
|
|
|
struct sk_buff *,
|
|
|
|
u32 *);
|
|
|
|
|
|
|
|
/* clear connection security */
|
|
|
|
void (*clear)(struct rxrpc_connection *);
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2016-04-04 13:00:35 +00:00
|
|
|
* RxRPC local transport endpoint description
|
|
|
|
* - owned by a single AF_RXRPC socket
|
|
|
|
* - pointed to by transport socket struct sk_user_data
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
|
|
|
struct rxrpc_local {
|
2016-04-04 13:00:35 +00:00
|
|
|
struct rcu_head rcu;
|
|
|
|
atomic_t usage;
|
|
|
|
struct list_head link;
|
2007-04-26 22:48:28 +00:00
|
|
|
struct socket *socket; /* my UDP socket */
|
2016-04-04 13:00:35 +00:00
|
|
|
struct work_struct processor;
|
2016-09-08 10:10:11 +00:00
|
|
|
struct hlist_head services; /* services listening on this endpoint */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
|
|
|
|
struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
|
|
|
|
struct sk_buff_head reject_queue; /* packets awaiting rejection */
|
2015-04-01 15:31:26 +00:00
|
|
|
struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
|
2016-06-17 14:42:35 +00:00
|
|
|
struct rb_root client_conns; /* Client connections by socket params */
|
|
|
|
spinlock_t client_conns_lock; /* Lock for client_conns */
|
2007-04-26 22:48:28 +00:00
|
|
|
spinlock_t lock; /* access lock */
|
|
|
|
rwlock_t services_lock; /* lock for services list */
|
|
|
|
int debug_id; /* debug ID for printks */
|
2016-04-04 13:00:35 +00:00
|
|
|
bool dead;
|
2007-04-26 22:48:28 +00:00
|
|
|
struct sockaddr_rxrpc srx; /* local address */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RxRPC remote transport endpoint definition
|
2016-04-04 13:00:32 +00:00
|
|
|
* - matched by local endpoint, remote port, address and protocol type
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
|
|
|
struct rxrpc_peer {
|
2016-04-04 13:00:32 +00:00
|
|
|
struct rcu_head rcu; /* This must be first */
|
|
|
|
atomic_t usage;
|
|
|
|
unsigned long hash_key;
|
|
|
|
struct hlist_node hash_link;
|
|
|
|
struct rxrpc_local *local;
|
2016-04-04 13:00:34 +00:00
|
|
|
struct hlist_head error_targets; /* targets for net error distribution */
|
|
|
|
struct work_struct error_distributor;
|
2016-06-17 09:06:56 +00:00
|
|
|
struct rb_root service_conns; /* Service connections */
|
2016-07-01 06:51:50 +00:00
|
|
|
seqlock_t service_conn_lock;
|
2007-04-26 22:48:28 +00:00
|
|
|
spinlock_t lock; /* access lock */
|
2012-04-15 05:58:06 +00:00
|
|
|
unsigned int if_mtu; /* interface MTU for this peer */
|
|
|
|
unsigned int mtu; /* network MTU for this peer */
|
|
|
|
unsigned int maxdata; /* data size (MTU - hdrsize) */
|
2007-04-26 22:48:28 +00:00
|
|
|
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
|
|
|
|
int debug_id; /* debug ID for printks */
|
2016-04-04 13:00:34 +00:00
|
|
|
int error_report; /* Net (+0) or local (+1000000) to distribute */
|
|
|
|
#define RXRPC_LOCAL_ERROR_OFFSET 1000000
|
2007-04-26 22:48:28 +00:00
|
|
|
struct sockaddr_rxrpc srx; /* remote address */
|
|
|
|
|
|
|
|
/* calculated RTT cache */
|
|
|
|
#define RXRPC_RTT_CACHE_SIZE 32
|
|
|
|
suseconds_t rtt; /* current RTT estimate (in uS) */
|
2012-04-15 05:58:06 +00:00
|
|
|
unsigned int rtt_point; /* next entry at which to insert */
|
|
|
|
unsigned int rtt_usage; /* amount of cache actually used */
|
2007-04-26 22:48:28 +00:00
|
|
|
suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
|
|
|
|
};
|
|
|
|
|
2016-04-04 13:00:36 +00:00
|
|
|
/*
|
|
|
|
* Keys for matching a connection.
|
|
|
|
*/
|
|
|
|
struct rxrpc_conn_proto {
|
2016-06-30 11:16:21 +00:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
u32 epoch; /* epoch of this connection */
|
|
|
|
u32 cid; /* connection ID */
|
|
|
|
};
|
|
|
|
u64 index_key;
|
2016-04-04 13:00:36 +00:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rxrpc_conn_parameters {
|
|
|
|
struct rxrpc_local *local; /* Representation of local endpoint */
|
|
|
|
struct rxrpc_peer *peer; /* Remote endpoint */
|
|
|
|
struct key *key; /* Security details */
|
|
|
|
bool exclusive; /* T if conn is exclusive */
|
|
|
|
u16 service_id; /* Service ID for this connection */
|
|
|
|
u32 security_level; /* Security level selected */
|
|
|
|
};
|
|
|
|
|
2016-06-27 09:32:02 +00:00
|
|
|
/*
|
|
|
|
* Bits in the connection flags.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_flag {
|
|
|
|
RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
|
2016-06-30 09:45:22 +00:00
|
|
|
RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
|
|
|
|
RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
|
|
|
|
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
|
|
|
|
RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
|
2016-06-27 09:32:02 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Events that can be raised upon a connection.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_event {
|
|
|
|
RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
|
|
|
|
};
|
|
|
|
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
/*
|
|
|
|
* The connection cache state.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_cache_state {
|
|
|
|
RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
|
|
|
|
RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
|
|
|
|
RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
|
|
|
|
RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
|
|
|
|
RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
|
|
|
|
};
|
|
|
|
|
2016-06-27 09:32:02 +00:00
|
|
|
/*
|
|
|
|
* The connection protocol state.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_proto_state {
|
|
|
|
RXRPC_CONN_UNUSED, /* Connection not yet attempted */
|
|
|
|
RXRPC_CONN_CLIENT, /* Client connection */
|
|
|
|
RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
|
|
|
|
RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
|
|
|
|
RXRPC_CONN_SERVICE, /* Service secured connection */
|
|
|
|
RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
|
|
|
|
RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
|
|
|
|
RXRPC_CONN__NR_STATES
|
|
|
|
};
|
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
|
|
|
* RxRPC connection definition
|
2016-06-17 09:06:56 +00:00
|
|
|
* - matched by { local, peer, epoch, conn_id, direction }
|
2007-04-26 22:48:28 +00:00
|
|
|
* - each connection can only handle four simultaneous calls
|
|
|
|
*/
|
|
|
|
struct rxrpc_connection {
|
2016-04-04 13:00:36 +00:00
|
|
|
struct rxrpc_conn_proto proto;
|
|
|
|
struct rxrpc_conn_parameters params;
|
|
|
|
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
atomic_t usage;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
struct list_head cache_link;
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 13:39:44 +00:00
|
|
|
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
spinlock_t channel_lock;
|
|
|
|
unsigned char active_chans; /* Mask of active channels */
|
|
|
|
#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
|
|
|
|
struct list_head waiting_calls; /* Calls waiting for channels */
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 13:39:44 +00:00
|
|
|
struct rxrpc_channel {
|
|
|
|
struct rxrpc_call __rcu *call; /* Active call */
|
|
|
|
u32 call_id; /* ID of current call */
|
|
|
|
u32 call_counter; /* Call ID counter */
|
|
|
|
u32 last_call; /* ID of last call */
|
2016-08-23 14:27:25 +00:00
|
|
|
u8 last_type; /* Type of last packet */
|
|
|
|
u16 last_service_id;
|
|
|
|
union {
|
|
|
|
u32 last_seq;
|
|
|
|
u32 last_abort;
|
|
|
|
};
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 13:39:44 +00:00
|
|
|
} channels[RXRPC_MAXCALLS];
|
2016-06-17 14:42:35 +00:00
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
struct work_struct processor; /* connection event processor */
|
2016-06-17 14:42:35 +00:00
|
|
|
union {
|
|
|
|
struct rb_node client_node; /* Node in local->client_conns */
|
2016-06-17 09:06:56 +00:00
|
|
|
struct rb_node service_node; /* Node in peer->service_conns */
|
2016-06-17 14:42:35 +00:00
|
|
|
};
|
2016-08-24 06:30:52 +00:00
|
|
|
struct list_head proc_link; /* link in procfs list */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct list_head link; /* link in master connection list */
|
|
|
|
struct sk_buff_head rx_queue; /* received conn-level packets */
|
2016-04-07 16:23:51 +00:00
|
|
|
const struct rxrpc_security *security; /* applied security module */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct key *server_key; /* security for this service */
|
2016-01-24 13:19:01 +00:00
|
|
|
struct crypto_skcipher *cipher; /* encryption handle */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct rxrpc_crypt csum_iv; /* packet checksum base */
|
2016-04-04 13:00:37 +00:00
|
|
|
unsigned long flags;
|
2007-04-26 22:48:28 +00:00
|
|
|
unsigned long events;
|
2016-08-23 14:27:24 +00:00
|
|
|
unsigned long idle_timestamp; /* Time at which last became idle */
|
2007-04-26 22:48:28 +00:00
|
|
|
spinlock_t state_lock; /* state-change lock */
|
2016-09-08 10:10:11 +00:00
|
|
|
enum rxrpc_conn_cache_state cache_state;
|
|
|
|
enum rxrpc_conn_proto_state state; /* current state of connection */
|
2016-04-07 16:23:30 +00:00
|
|
|
u32 local_abort; /* local abort code */
|
|
|
|
u32 remote_abort; /* remote abort code */
|
2007-04-26 22:48:28 +00:00
|
|
|
int debug_id; /* debug ID for printks */
|
|
|
|
atomic_t serial; /* packet serial number counter */
|
2016-08-23 14:27:25 +00:00
|
|
|
unsigned int hi_serial; /* highest serial number received */
|
2007-04-26 22:48:28 +00:00
|
|
|
u8 size_align; /* data size alignment (for security) */
|
|
|
|
u8 header_size; /* rxrpc + security header size */
|
|
|
|
u8 security_size; /* security header size */
|
|
|
|
u32 security_nonce; /* response re-use preventer */
|
|
|
|
u8 security_ix; /* security type */
|
|
|
|
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
|
|
|
|
};
|
|
|
|
|
2016-03-04 15:53:46 +00:00
|
|
|
/*
|
|
|
|
* Flags in call->flags.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_flag {
|
|
|
|
RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
|
|
|
|
RXRPC_CALL_TERMINAL_MSG, /* call has given the socket its final message */
|
|
|
|
RXRPC_CALL_RCVD_LAST, /* all packets received */
|
|
|
|
RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
|
|
|
|
RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
|
|
|
|
RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
|
|
|
|
RXRPC_CALL_HAS_USERID, /* has a user ID attached */
|
|
|
|
RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
|
2016-08-23 14:27:24 +00:00
|
|
|
RXRPC_CALL_IS_SERVICE, /* Call is service call */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 19:42:14 +00:00
|
|
|
RXRPC_CALL_RX_NO_MORE, /* Don't indicate MSG_MORE from recvmsg() */
|
2016-03-04 15:53:46 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Events that can be raised on a call.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_event {
|
2016-03-04 15:53:46 +00:00
|
|
|
RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
|
|
|
|
RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
|
|
|
|
RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
|
|
|
|
RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
|
|
|
|
RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
|
|
|
|
RXRPC_CALL_EV_ACK, /* need to generate ACK */
|
|
|
|
RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
|
|
|
|
RXRPC_CALL_EV_ABORT, /* need to generate abort */
|
|
|
|
RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
|
|
|
|
RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
|
|
|
|
RXRPC_CALL_EV_RESEND, /* Tx resend required */
|
|
|
|
RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
|
|
|
|
RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
|
|
|
|
RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
|
|
|
|
RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
|
|
|
|
RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
|
2016-03-04 15:53:46 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The states that a call can be in.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_state {
|
2016-06-17 14:42:35 +00:00
|
|
|
RXRPC_CALL_UNINITIALISED,
|
|
|
|
RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
|
2016-03-04 15:53:46 +00:00
|
|
|
RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
|
|
|
|
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
|
|
|
|
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
|
|
|
|
RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
|
|
|
|
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
|
|
|
|
RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
|
|
|
|
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
|
|
|
|
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
|
|
|
|
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
|
|
|
|
RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
|
2016-08-30 08:49:28 +00:00
|
|
|
RXRPC_CALL_COMPLETE, /* - call complete */
|
|
|
|
NR__RXRPC_CALL_STATES
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call completion condition (state == RXRPC_CALL_COMPLETE).
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_completion {
|
|
|
|
RXRPC_CALL_SUCCEEDED, /* - Normal termination */
|
2016-03-04 15:53:46 +00:00
|
|
|
RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
|
|
|
|
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
|
|
|
|
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
|
2016-08-30 08:49:28 +00:00
|
|
|
RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
|
2016-03-04 15:53:46 +00:00
|
|
|
RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
|
2016-08-30 08:49:28 +00:00
|
|
|
NR__RXRPC_CALL_COMPLETIONS
|
2016-03-04 15:53:46 +00:00
|
|
|
};
|
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
|
|
|
* RxRPC call definition
|
|
|
|
* - matched by { connection, call_id }
|
|
|
|
*/
|
|
|
|
struct rxrpc_call {
|
2016-06-27 16:11:19 +00:00
|
|
|
struct rcu_head rcu;
|
2007-04-26 22:48:28 +00:00
|
|
|
struct rxrpc_connection *conn; /* connection carrying call */
|
2016-08-24 13:31:43 +00:00
|
|
|
struct rxrpc_peer *peer; /* Peer record for remote address */
|
2016-09-07 08:19:31 +00:00
|
|
|
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct timer_list lifetimer; /* lifetime remaining on call */
|
|
|
|
struct timer_list ack_timer; /* ACK generation timer */
|
|
|
|
struct timer_list resend_timer; /* Tx resend timer */
|
|
|
|
struct work_struct processor; /* packet processor and ACK generator */
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 19:42:14 +00:00
|
|
|
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct list_head link; /* link in master call list */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
struct list_head chan_wait_link; /* Link in conn->waiting_calls */
|
2016-04-04 13:00:34 +00:00
|
|
|
struct hlist_node error_link; /* link in error distribution list */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct list_head accept_link; /* calls awaiting acceptance */
|
|
|
|
struct rb_node sock_node; /* node in socket call tree */
|
|
|
|
struct sk_buff_head rx_queue; /* received packets */
|
|
|
|
struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 19:42:14 +00:00
|
|
|
struct sk_buff_head knlrecv_queue; /* Queue for kernel_recv [TODO: replace this] */
|
2007-04-26 22:48:28 +00:00
|
|
|
struct sk_buff *tx_pending; /* Tx socket buffer being filled */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
wait_queue_head_t waitq; /* Wait queue for channel or Tx */
|
2016-06-26 21:55:24 +00:00
|
|
|
__be32 crypto_buf[2]; /* Temporary packet crypto buffer */
|
2007-04-26 22:48:28 +00:00
|
|
|
unsigned long user_call_ID; /* user-defined call ID */
|
|
|
|
unsigned long creation_jif; /* time of call creation */
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long events;
|
|
|
|
spinlock_t lock;
|
|
|
|
rwlock_t state_lock; /* lock for state transition */
|
2016-08-30 08:49:28 +00:00
|
|
|
u32 abort_code; /* Local/remote abort code */
|
|
|
|
int error; /* Local error incurred */
|
2016-09-08 10:10:11 +00:00
|
|
|
enum rxrpc_call_state state; /* current state of call */
|
|
|
|
enum rxrpc_call_completion completion; /* Call completion condition */
|
2007-04-26 22:48:28 +00:00
|
|
|
atomic_t usage;
|
|
|
|
atomic_t sequence; /* Tx data packet sequence counter */
|
2016-08-23 14:27:24 +00:00
|
|
|
u16 service_id; /* service ID */
|
2016-09-07 14:19:25 +00:00
|
|
|
u8 security_ix; /* Security type */
|
2016-08-23 14:27:24 +00:00
|
|
|
u32 call_id; /* call ID on connection */
|
|
|
|
u32 cid; /* connection ID plus channel index */
|
|
|
|
int debug_id; /* debug ID for printks */
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/* transmission-phase ACK management */
|
2009-09-16 07:01:13 +00:00
|
|
|
u8 acks_head; /* offset into window of first entry */
|
|
|
|
u8 acks_tail; /* offset into window of last entry */
|
|
|
|
u8 acks_winsz; /* size of un-ACK'd window */
|
|
|
|
u8 acks_unacked; /* lowest unacked packet in last ACK received */
|
2007-04-26 22:48:28 +00:00
|
|
|
int acks_latest; /* serial number of latest ACK received */
|
|
|
|
rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
|
|
|
|
unsigned long *acks_window; /* sent packet window
|
|
|
|
* - elements are pointers with LSB set if ACK'd
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* receive-phase ACK management */
|
|
|
|
rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
|
|
|
|
rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
|
|
|
|
rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
|
|
|
|
rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
|
|
|
|
rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
|
|
|
|
rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
|
2016-03-04 15:53:46 +00:00
|
|
|
rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
|
2009-09-16 07:01:13 +00:00
|
|
|
u8 ackr_reason; /* reason to ACK */
|
2016-08-23 14:27:25 +00:00
|
|
|
u16 ackr_skew; /* skew on packet being ACK'd */
|
2016-03-04 15:53:46 +00:00
|
|
|
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
|
2007-04-26 22:48:28 +00:00
|
|
|
atomic_t ackr_not_idle; /* number of packets in Rx queue */
|
|
|
|
|
|
|
|
/* received packet records, 1 bit per record */
|
|
|
|
#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
|
|
|
|
unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
|
|
|
|
};
|
|
|
|
|
2016-09-07 13:34:21 +00:00
|
|
|
enum rxrpc_call_trace {
|
|
|
|
rxrpc_call_new_client,
|
|
|
|
rxrpc_call_new_service,
|
|
|
|
rxrpc_call_queued,
|
|
|
|
rxrpc_call_queued_ref,
|
|
|
|
rxrpc_call_seen,
|
|
|
|
rxrpc_call_got,
|
|
|
|
rxrpc_call_got_skb,
|
|
|
|
rxrpc_call_got_userid,
|
|
|
|
rxrpc_call_put,
|
|
|
|
rxrpc_call_put_skb,
|
|
|
|
rxrpc_call_put_userid,
|
|
|
|
rxrpc_call_put_noqueue,
|
|
|
|
rxrpc_call__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_call_traces[rxrpc_call__nr_trace][4];
|
|
|
|
|
2016-08-23 14:27:24 +00:00
|
|
|
#include <trace/events/rxrpc.h>
|
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-26 22:50:17 +00:00
|
|
|
* af_rxrpc.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-26 22:50:17 +00:00
|
|
|
extern atomic_t rxrpc_n_skbs;
|
2016-03-04 15:53:46 +00:00
|
|
|
extern u32 rxrpc_epoch;
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-26 22:50:17 +00:00
|
|
|
extern atomic_t rxrpc_debug_id;
|
|
|
|
extern struct workqueue_struct *rxrpc_workqueue;
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* call_accept.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-04-04 13:00:35 +00:00
|
|
|
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 19:42:14 +00:00
|
|
|
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
|
|
|
|
rxrpc_notify_rx_t);
|
2013-10-18 20:48:25 +00:00
|
|
|
int rxrpc_reject_call(struct rxrpc_sock *);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* call_event.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-08-23 14:27:25 +00:00
|
|
|
void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool);
|
|
|
|
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool);
|
2013-10-18 20:48:25 +00:00
|
|
|
void rxrpc_process_call(struct work_struct *);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* call_object.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-08-30 08:49:28 +00:00
|
|
|
extern const char *const rxrpc_call_states[];
|
|
|
|
extern const char *const rxrpc_call_completions[];
|
2016-03-09 23:22:56 +00:00
|
|
|
extern unsigned int rxrpc_max_call_lifetime;
|
2007-04-26 22:48:28 +00:00
|
|
|
extern struct kmem_cache *rxrpc_call_jar;
|
|
|
|
extern struct list_head rxrpc_calls;
|
|
|
|
extern rwlock_t rxrpc_call_lock;
|
|
|
|
|
2016-06-09 22:02:51 +00:00
|
|
|
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
|
|
|
|
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
|
2016-04-04 13:00:36 +00:00
|
|
|
struct rxrpc_conn_parameters *,
|
2016-06-17 14:42:35 +00:00
|
|
|
struct sockaddr_rxrpc *,
|
2016-06-09 22:02:51 +00:00
|
|
|
unsigned long, gfp_t);
|
2013-10-18 20:48:25 +00:00
|
|
|
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
|
|
|
|
struct rxrpc_connection *,
|
2016-06-16 12:31:07 +00:00
|
|
|
struct sk_buff *);
|
2016-09-07 08:19:31 +00:00
|
|
|
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
|
2013-10-18 20:48:25 +00:00
|
|
|
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
|
2016-09-07 08:19:31 +00:00
|
|
|
bool __rxrpc_queue_call(struct rxrpc_call *);
|
|
|
|
bool rxrpc_queue_call(struct rxrpc_call *);
|
2016-08-30 08:49:29 +00:00
|
|
|
void rxrpc_see_call(struct rxrpc_call *);
|
2016-09-07 13:34:21 +00:00
|
|
|
void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
|
|
|
|
void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
|
2016-08-30 08:49:29 +00:00
|
|
|
void rxrpc_get_call_for_skb(struct rxrpc_call *, struct sk_buff *);
|
|
|
|
void rxrpc_put_call_for_skb(struct rxrpc_call *, struct sk_buff *);
|
2013-10-18 20:48:25 +00:00
|
|
|
void __exit rxrpc_destroy_all_calls(void);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2016-08-23 14:27:24 +00:00
|
|
|
static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
|
|
|
|
{
|
|
|
|
return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
|
|
|
|
{
|
|
|
|
return !rxrpc_is_service_call(call);
|
|
|
|
}
|
|
|
|
|
2016-08-30 08:49:28 +00:00
|
|
|
/*
|
|
|
|
* Transition a call to the complete state.
|
|
|
|
*/
|
|
|
|
static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
|
|
|
|
enum rxrpc_call_completion compl,
|
|
|
|
u32 abort_code,
|
|
|
|
int error)
|
|
|
|
{
|
|
|
|
if (call->state < RXRPC_CALL_COMPLETE) {
|
|
|
|
call->abort_code = abort_code;
|
|
|
|
call->error = error;
|
|
|
|
call->completion = compl,
|
|
|
|
call->state = RXRPC_CALL_COMPLETE;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
|
|
|
|
enum rxrpc_call_completion compl,
|
|
|
|
u32 abort_code,
|
|
|
|
int error)
|
|
|
|
{
|
2016-09-07 15:34:12 +00:00
|
|
|
bool ret;
|
2016-08-30 08:49:28 +00:00
|
|
|
|
|
|
|
write_lock_bh(&call->state_lock);
|
|
|
|
ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
|
|
|
|
write_unlock_bh(&call->state_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record that a call successfully completed.
|
|
|
|
*/
|
2016-09-07 15:34:12 +00:00
|
|
|
static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
|
2016-08-30 08:49:28 +00:00
|
|
|
{
|
2016-09-07 15:34:12 +00:00
|
|
|
return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
|
2016-08-30 08:49:28 +00:00
|
|
|
}
|
|
|
|
|
2016-09-07 15:34:12 +00:00
|
|
|
static inline bool rxrpc_call_completed(struct rxrpc_call *call)
|
2016-08-30 08:49:28 +00:00
|
|
|
{
|
2016-09-07 15:34:12 +00:00
|
|
|
bool ret;
|
|
|
|
|
2016-08-30 08:49:28 +00:00
|
|
|
write_lock_bh(&call->state_lock);
|
2016-09-07 15:34:12 +00:00
|
|
|
ret = __rxrpc_call_completed(call);
|
2016-08-30 08:49:28 +00:00
|
|
|
write_unlock_bh(&call->state_lock);
|
2016-09-07 15:34:12 +00:00
|
|
|
return ret;
|
2016-08-30 08:49:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record that a call is locally aborted.
|
|
|
|
*/
|
2016-09-06 21:19:51 +00:00
|
|
|
static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
|
|
|
|
rxrpc_seq_t seq,
|
2016-08-30 08:49:28 +00:00
|
|
|
u32 abort_code, int error)
|
|
|
|
{
|
2016-09-06 21:19:51 +00:00
|
|
|
trace_rxrpc_abort(why, call->cid, call->call_id, seq,
|
|
|
|
abort_code, error);
|
2016-08-30 08:49:28 +00:00
|
|
|
if (__rxrpc_set_call_completion(call,
|
|
|
|
RXRPC_CALL_LOCALLY_ABORTED,
|
|
|
|
abort_code, error)) {
|
|
|
|
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-06 21:19:51 +00:00
|
|
|
static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
|
|
|
|
rxrpc_seq_t seq, u32 abort_code, int error)
|
2016-08-30 08:49:28 +00:00
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
write_lock_bh(&call->state_lock);
|
2016-09-06 21:19:51 +00:00
|
|
|
ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
|
2016-08-30 08:49:28 +00:00
|
|
|
write_unlock_bh(&call->state_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-04-04 13:00:37 +00:00
|
|
|
/*
|
|
|
|
* conn_client.c
|
|
|
|
*/
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
extern unsigned int rxrpc_max_client_connections;
|
|
|
|
extern unsigned int rxrpc_reap_client_connections;
|
|
|
|
extern unsigned int rxrpc_conn_idle_client_expiry;
|
|
|
|
extern unsigned int rxrpc_conn_idle_client_fast_expiry;
|
2016-04-04 13:00:37 +00:00
|
|
|
extern struct idr rxrpc_client_conn_ids;
|
|
|
|
|
2016-06-27 09:32:02 +00:00
|
|
|
void rxrpc_destroy_client_conn_ids(void);
|
2016-04-04 13:00:40 +00:00
|
|
|
int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
|
|
|
|
struct sockaddr_rxrpc *, gfp_t);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
void rxrpc_expose_client_call(struct rxrpc_call *);
|
|
|
|
void rxrpc_disconnect_client_call(struct rxrpc_call *);
|
|
|
|
void rxrpc_put_client_conn(struct rxrpc_connection *);
|
|
|
|
void __exit rxrpc_destroy_all_client_connections(void);
|
2016-04-04 13:00:37 +00:00
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* conn_event.c
|
|
|
|
*/
|
|
|
|
void rxrpc_process_connection(struct work_struct *);
|
|
|
|
void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
|
2016-04-04 13:00:35 +00:00
|
|
|
void rxrpc_reject_packets(struct rxrpc_local *);
|
2016-06-13 12:30:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* conn_object.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-03-09 23:22:56 +00:00
|
|
|
extern unsigned int rxrpc_connection_expiry;
|
2007-04-26 22:48:28 +00:00
|
|
|
extern struct list_head rxrpc_connections;
|
2016-08-24 06:30:52 +00:00
|
|
|
extern struct list_head rxrpc_connection_proc_list;
|
2007-04-26 22:48:28 +00:00
|
|
|
extern rwlock_t rxrpc_connection_lock;
|
|
|
|
|
2016-07-01 06:51:50 +00:00
|
|
|
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
|
2016-04-04 13:00:40 +00:00
|
|
|
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
|
2016-07-01 06:51:50 +00:00
|
|
|
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
|
|
|
|
struct sk_buff *);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
|
2016-06-17 14:42:35 +00:00
|
|
|
void rxrpc_disconnect_call(struct rxrpc_call *);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
void rxrpc_kill_connection(struct rxrpc_connection *);
|
2016-08-23 14:27:24 +00:00
|
|
|
void __rxrpc_put_connection(struct rxrpc_connection *);
|
2013-10-18 20:48:25 +00:00
|
|
|
void __exit rxrpc_destroy_all_connections(void);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2016-04-04 13:00:36 +00:00
|
|
|
static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
|
|
|
|
{
|
|
|
|
return conn->out_clientflag;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
|
|
|
|
{
|
2016-06-30 11:16:21 +00:00
|
|
|
return !rxrpc_conn_is_client(conn);
|
2016-04-04 13:00:36 +00:00
|
|
|
}
|
|
|
|
|
2016-04-04 13:00:38 +00:00
|
|
|
static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
|
|
|
|
{
|
|
|
|
atomic_inc(&conn->usage);
|
|
|
|
}
|
|
|
|
|
2016-06-27 09:32:03 +00:00
|
|
|
static inline
|
|
|
|
struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
|
|
|
|
{
|
|
|
|
return atomic_inc_not_zero(&conn->usage) ? conn : NULL;
|
|
|
|
}
|
2016-06-27 09:32:02 +00:00
|
|
|
|
2016-08-23 14:27:24 +00:00
|
|
|
static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
|
|
|
|
{
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 06:30:52 +00:00
|
|
|
if (!conn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (rxrpc_conn_is_client(conn)) {
|
|
|
|
if (atomic_dec_and_test(&conn->usage))
|
|
|
|
rxrpc_put_client_conn(conn);
|
|
|
|
} else {
|
|
|
|
if (atomic_dec_return(&conn->usage) == 1)
|
|
|
|
__rxrpc_put_connection(conn);
|
|
|
|
}
|
2016-08-23 14:27:24 +00:00
|
|
|
}
|
|
|
|
|
2016-07-01 06:51:50 +00:00
|
|
|
static inline bool rxrpc_queue_conn(struct rxrpc_connection *conn)
|
2016-06-27 09:32:02 +00:00
|
|
|
{
|
2016-07-01 06:51:50 +00:00
|
|
|
if (!rxrpc_get_connection_maybe(conn))
|
|
|
|
return false;
|
|
|
|
if (!rxrpc_queue_work(&conn->processor))
|
2016-06-27 09:32:03 +00:00
|
|
|
rxrpc_put_connection(conn);
|
2016-07-01 06:51:50 +00:00
|
|
|
return true;
|
2016-06-27 09:32:02 +00:00
|
|
|
}
|
|
|
|
|
2016-04-04 13:00:40 +00:00
|
|
|
/*
|
|
|
|
* conn_service.c
|
|
|
|
*/
|
2016-07-01 06:51:50 +00:00
|
|
|
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
|
|
|
|
struct sk_buff *);
|
2016-04-04 13:00:40 +00:00
|
|
|
struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *,
|
2016-06-29 13:40:39 +00:00
|
|
|
struct sockaddr_rxrpc *,
|
2016-04-04 13:00:40 +00:00
|
|
|
struct sk_buff *);
|
2016-06-30 09:45:22 +00:00
|
|
|
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
|
2016-04-04 13:00:40 +00:00
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* input.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-06-13 12:30:30 +00:00
|
|
|
void rxrpc_data_ready(struct sock *);
|
|
|
|
int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
|
|
|
|
void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* insecure.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-06-13 12:30:30 +00:00
|
|
|
extern const struct rxrpc_security rxrpc_no_security;
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* key.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-06-13 12:30:30 +00:00
|
|
|
extern struct key_type key_type_rxrpc;
|
|
|
|
extern struct key_type key_type_rxrpc_s;
|
|
|
|
|
|
|
|
int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
|
|
|
|
int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
|
|
|
|
int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
|
|
|
|
u32);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2016-04-04 13:00:34 +00:00
|
|
|
/*
|
|
|
|
* local_event.c
|
|
|
|
*/
|
2016-04-04 13:00:35 +00:00
|
|
|
extern void rxrpc_process_local_events(struct rxrpc_local *);
|
2016-04-04 13:00:34 +00:00
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* local_object.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-04-04 13:00:35 +00:00
|
|
|
struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
|
|
|
|
void __rxrpc_put_local(struct rxrpc_local *);
|
2013-10-18 20:48:25 +00:00
|
|
|
void __exit rxrpc_destroy_all_locals(void);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2016-04-04 13:00:35 +00:00
|
|
|
static inline void rxrpc_get_local(struct rxrpc_local *local)
|
|
|
|
{
|
|
|
|
atomic_inc(&local->usage);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
|
|
|
|
{
|
|
|
|
return atomic_inc_not_zero(&local->usage) ? local : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rxrpc_put_local(struct rxrpc_local *local)
|
|
|
|
{
|
2016-04-04 13:00:38 +00:00
|
|
|
if (local && atomic_dec_and_test(&local->usage))
|
2016-04-04 13:00:35 +00:00
|
|
|
__rxrpc_put_local(local);
|
|
|
|
}
|
|
|
|
|
2016-06-27 09:32:02 +00:00
|
|
|
static inline void rxrpc_queue_local(struct rxrpc_local *local)
|
|
|
|
{
|
|
|
|
rxrpc_queue_work(&local->processor);
|
|
|
|
}
|
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* misc.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-06-13 12:30:30 +00:00
|
|
|
extern unsigned int rxrpc_max_backlog __read_mostly;
|
|
|
|
extern unsigned int rxrpc_requested_ack_delay;
|
|
|
|
extern unsigned int rxrpc_soft_ack_delay;
|
|
|
|
extern unsigned int rxrpc_idle_ack_delay;
|
|
|
|
extern unsigned int rxrpc_rx_window_size;
|
|
|
|
extern unsigned int rxrpc_rx_mtu;
|
|
|
|
extern unsigned int rxrpc_rx_jumbo_max;
|
2016-09-02 21:39:45 +00:00
|
|
|
extern unsigned int rxrpc_resend_timeout;
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2016-06-13 12:30:30 +00:00
|
|
|
extern const char *const rxrpc_pkts[];
|
|
|
|
extern const s8 rxrpc_ack_priority[];
|
|
|
|
|
|
|
|
extern const char *rxrpc_acks(u8 reason);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* output.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-09-07 08:19:31 +00:00
|
|
|
int rxrpc_send_call_packet(struct rxrpc_call *, u8);
|
2016-06-17 10:53:37 +00:00
|
|
|
int rxrpc_send_data_packet(struct rxrpc_connection *, struct sk_buff *);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-04-04 13:00:32 +00:00
|
|
|
* peer_event.c
|
2016-06-13 12:30:30 +00:00
|
|
|
*/
|
2016-04-04 13:00:32 +00:00
|
|
|
void rxrpc_error_report(struct sock *);
|
2016-04-04 13:00:34 +00:00
|
|
|
void rxrpc_peer_error_distributor(struct work_struct *);
|
2016-06-13 12:30:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* peer_object.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-04-04 13:00:32 +00:00
|
|
|
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
|
|
|
|
const struct sockaddr_rxrpc *);
|
|
|
|
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
|
|
|
|
struct sockaddr_rxrpc *, gfp_t);
|
|
|
|
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
|
|
|
|
|
2016-08-24 13:31:43 +00:00
|
|
|
static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
|
2016-04-04 13:00:32 +00:00
|
|
|
{
|
|
|
|
atomic_inc(&peer->usage);
|
2016-08-24 13:31:43 +00:00
|
|
|
return peer;
|
2016-04-04 13:00:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
|
|
|
|
{
|
|
|
|
return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
|
|
|
|
static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
|
|
|
|
{
|
2016-04-04 13:00:38 +00:00
|
|
|
if (peer && atomic_dec_and_test(&peer->usage))
|
2016-04-04 13:00:32 +00:00
|
|
|
__rxrpc_put_peer(peer);
|
|
|
|
}
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* proc.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2008-01-31 02:55:45 +00:00
|
|
|
extern const struct file_operations rxrpc_call_seq_fops;
|
|
|
|
extern const struct file_operations rxrpc_connection_seq_fops;
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* recvmsg.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2015-03-02 07:37:48 +00:00
|
|
|
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* rxkad.c
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_RXKAD
|
|
|
|
extern const struct rxrpc_security rxkad;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* security.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
2016-04-07 16:23:51 +00:00
|
|
|
int __init rxrpc_init_security(void);
|
|
|
|
void rxrpc_exit_security(void);
|
2013-10-18 20:48:25 +00:00
|
|
|
int rxrpc_init_client_conn_security(struct rxrpc_connection *);
|
|
|
|
int rxrpc_init_server_conn_security(struct rxrpc_connection *);
|
2016-09-07 13:43:39 +00:00
|
|
|
|
2016-09-02 21:39:45 +00:00
|
|
|
/*
|
|
|
|
* sendmsg.c
|
|
|
|
*/
|
|
|
|
int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-13 12:30:30 +00:00
|
|
|
* skbuff.c
|
2007-04-26 22:48:28 +00:00
|
|
|
*/
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 19:42:14 +00:00
|
|
|
void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
|
2013-10-18 20:48:25 +00:00
|
|
|
void rxrpc_packet_destructor(struct sk_buff *);
|
2016-08-23 14:27:24 +00:00
|
|
|
void rxrpc_new_skb(struct sk_buff *);
|
|
|
|
void rxrpc_see_skb(struct sk_buff *);
|
|
|
|
void rxrpc_get_skb(struct sk_buff *);
|
|
|
|
void rxrpc_free_skb(struct sk_buff *);
|
|
|
|
void rxrpc_purge_queue(struct sk_buff_head *);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2014-02-07 18:58:44 +00:00
|
|
|
/*
|
|
|
|
* sysctl.c
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
extern int __init rxrpc_sysctl_init(void);
|
|
|
|
extern void rxrpc_sysctl_exit(void);
|
|
|
|
#else
|
|
|
|
static inline int __init rxrpc_sysctl_init(void) { return 0; }
|
|
|
|
static inline void rxrpc_sysctl_exit(void) {}
|
|
|
|
#endif
|
|
|
|
|
2016-04-04 13:00:32 +00:00
|
|
|
/*
|
|
|
|
* utils.c
|
|
|
|
*/
|
2016-06-29 13:40:39 +00:00
|
|
|
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
|
2016-04-04 13:00:32 +00:00
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
/*
|
|
|
|
* debug tracing
|
|
|
|
*/
|
2012-04-15 05:58:06 +00:00
|
|
|
extern unsigned int rxrpc_debug;
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#define dbgprintk(FMT,...) \
|
2008-04-03 09:45:30 +00:00
|
|
|
printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2008-03-06 04:47:47 +00:00
|
|
|
#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
|
|
|
|
#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
|
2007-04-26 22:48:28 +00:00
|
|
|
#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
|
|
|
|
#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
|
|
|
|
#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(__KDEBUG)
|
|
|
|
#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
|
|
|
|
#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
|
|
|
|
#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
|
|
|
|
#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
|
|
|
|
#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
|
|
|
|
|
|
|
|
#elif defined(CONFIG_AF_RXRPC_DEBUG)
|
|
|
|
#define RXRPC_DEBUG_KENTER 0x01
|
|
|
|
#define RXRPC_DEBUG_KLEAVE 0x02
|
|
|
|
#define RXRPC_DEBUG_KDEBUG 0x04
|
|
|
|
#define RXRPC_DEBUG_KPROTO 0x08
|
|
|
|
#define RXRPC_DEBUG_KNET 0x10
|
|
|
|
|
|
|
|
#define _enter(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
|
|
|
|
kenter(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _leave(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
|
|
|
|
kleave(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _debug(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
|
|
|
|
kdebug(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _proto(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
|
|
|
|
kproto(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _net(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
|
|
|
|
knet(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#else
|
2010-08-12 15:54:57 +00:00
|
|
|
#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
|
|
|
|
#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
|
|
|
|
#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
|
|
|
|
#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
|
|
|
|
#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
|
2007-04-26 22:48:28 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* debug assertion checking
|
|
|
|
*/
|
|
|
|
#if 1 // defined(__KDEBUGALL)
|
|
|
|
|
|
|
|
#define ASSERT(X) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(!(X))) { \
|
2016-06-02 19:08:52 +00:00
|
|
|
pr_err("Assertion failed\n"); \
|
2007-04-26 22:48:28 +00:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 15:56:19 +00:00
|
|
|
} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#define ASSERTCMP(X, OP, Y) \
|
|
|
|
do { \
|
2016-09-08 10:10:11 +00:00
|
|
|
__typeof__(X) _x = (X); \
|
|
|
|
__typeof__(Y) _y = (__typeof__(X))(Y); \
|
2016-06-02 19:08:52 +00:00
|
|
|
if (unlikely(!(_x OP _y))) { \
|
2016-09-08 10:10:11 +00:00
|
|
|
pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
|
|
|
|
(unsigned long)_x, (unsigned long)_x, #OP, \
|
|
|
|
(unsigned long)_y, (unsigned long)_y); \
|
2007-04-26 22:48:28 +00:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 15:56:19 +00:00
|
|
|
} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#define ASSERTIF(C, X) \
|
|
|
|
do { \
|
|
|
|
if (unlikely((C) && !(X))) { \
|
2016-06-02 19:08:52 +00:00
|
|
|
pr_err("Assertion failed\n"); \
|
2007-04-26 22:48:28 +00:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 15:56:19 +00:00
|
|
|
} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#define ASSERTIFCMP(C, X, OP, Y) \
|
|
|
|
do { \
|
2016-09-08 10:10:11 +00:00
|
|
|
__typeof__(X) _x = (X); \
|
|
|
|
__typeof__(Y) _y = (__typeof__(X))(Y); \
|
2016-06-02 19:08:52 +00:00
|
|
|
if (unlikely((C) && !(_x OP _y))) { \
|
|
|
|
pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
|
2016-09-08 10:10:11 +00:00
|
|
|
(unsigned long)_x, (unsigned long)_x, #OP, \
|
|
|
|
(unsigned long)_y, (unsigned long)_y); \
|
2007-04-26 22:48:28 +00:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 15:56:19 +00:00
|
|
|
} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define ASSERT(X) \
|
|
|
|
do { \
|
2016-03-04 15:56:19 +00:00
|
|
|
} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#define ASSERTCMP(X, OP, Y) \
|
|
|
|
do { \
|
2016-03-04 15:56:19 +00:00
|
|
|
} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#define ASSERTIF(C, X) \
|
|
|
|
do { \
|
2016-03-04 15:56:19 +00:00
|
|
|
} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#define ASSERTIFCMP(C, X, OP, Y) \
|
|
|
|
do { \
|
2016-03-04 15:56:19 +00:00
|
|
|
} while (0)
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
#endif /* __KDEBUGALL */
|