linux/net/rxrpc/skbuff.c
David Howells d001648ec7 rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.

This makes the following possibilities more achievable:

 (1) Call refcounting can be made simpler if skbs don't hold refs to calls.

 (2) skbs referring to non-data events will be able to be freed much sooner
     rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
     will be able to consult the call state.

 (3) We can shortcut the receive phase when a call is remotely aborted
     because we don't have to go through all the packets to get to the one
     cancelling the operation.

 (4) It makes it easier to do encryption/decryption directly between AFS's
     buffers and sk_buffs.

 (5) Encryption/decryption can more easily be done in the AFS's thread
     contexts - usually that of the userspace process that issued a syscall
     - rather than in one of rxrpc's background threads on a workqueue.

 (6) AFS will be able to wait synchronously on a call inside AF_RXRPC.

To make this work, the following interface function has been added:

     int rxrpc_kernel_recv_data(
		struct socket *sock, struct rxrpc_call *call,
		void *buffer, size_t bufsize, size_t *_offset,
		bool want_more, u32 *_abort_code);

This is the recvmsg equivalent.  It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.

afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them.  They don't wait synchronously yet because the socket
lock needs to be dealt with.

Five interface functions have been removed:

	rxrpc_kernel_is_data_last()
    	rxrpc_kernel_get_abort_code()
    	rxrpc_kernel_get_error_number()
    	rxrpc_kernel_free_skb()
    	rxrpc_kernel_data_consumed()

As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user.  To process the queue internally, a temporary function,
temp_deliver_data() has been added.  This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-09-01 16:43:27 -07:00

225 lines
5.9 KiB
C

/* ar-skbuff.c: socket buffer destruction handling
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
/*
* set up for the ACK at the end of the receive phase when we discard the final
* receive phase data packet
* - called with softirqs disabled
*/
static void rxrpc_request_final_ACK(struct rxrpc_call *call)
{
/* the call may be aborted before we have a chance to ACK it */
write_lock(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_CLIENT_RECV_REPLY:
call->state = RXRPC_CALL_CLIENT_FINAL_ACK;
_debug("request final ACK");
/* get an extra ref on the call for the final-ACK generator to
* release */
rxrpc_get_call(call);
set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
if (try_to_del_timer_sync(&call->ack_timer) >= 0)
rxrpc_queue_call(call);
break;
case RXRPC_CALL_SERVER_RECV_REQUEST:
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
default:
break;
}
write_unlock(&call->state_lock);
}
/*
* drop the bottom ACK off of the call ACK window and advance the window
*/
static void rxrpc_hard_ACK_data(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
int loop;
u32 seq;
spin_lock_bh(&call->lock);
_debug("hard ACK #%u", sp->hdr.seq);
for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
call->ackr_window[loop] >>= 1;
call->ackr_window[loop] |=
call->ackr_window[loop + 1] << (BITS_PER_LONG - 1);
}
seq = sp->hdr.seq;
ASSERTCMP(seq, ==, call->rx_data_eaten + 1);
call->rx_data_eaten = seq;
if (call->ackr_win_top < UINT_MAX)
call->ackr_win_top++;
ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
call->rx_data_post, >=, call->rx_data_recv);
ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
call->rx_data_recv, >=, call->rx_data_eaten);
if (sp->hdr.flags & RXRPC_LAST_PACKET) {
rxrpc_request_final_ACK(call);
} else if (atomic_dec_and_test(&call->ackr_not_idle) &&
test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
/* We previously soft-ACK'd some received packets that have now
* been consumed, so send a hard-ACK if no more packets are
* immediately forthcoming to allow the transmitter to free up
* its Tx bufferage.
*/
_debug("send Rx idle ACK");
__rxrpc_propose_ACK(call, RXRPC_ACK_IDLE,
skb->priority, sp->hdr.serial, false);
}
spin_unlock_bh(&call->lock);
}
/**
* rxrpc_kernel_data_consumed - Record consumption of data message
* @call: The call to which the message pertains.
* @skb: Message holding data
*
* Record the consumption of a data message and generate an ACK if appropriate.
* The call state is shifted if this was the final packet. The caller must be
* in process context with no spinlocks held.
*
* TODO: Actually generate the ACK here rather than punting this to the
* workqueue.
*/
void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
_enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq);
ASSERTCMP(sp->call, ==, call);
ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA);
/* TODO: Fix the sequence number tracking */
ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
call->rx_data_recv = sp->hdr.seq;
rxrpc_hard_ACK_data(call, skb);
}
/*
* Destroy a packet that has an RxRPC control buffer
*/
void rxrpc_packet_destructor(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_call *call = sp->call;
_enter("%p{%p}", skb, call);
if (call) {
rxrpc_put_call_for_skb(call, skb);
sp->call = NULL;
}
if (skb->sk)
sock_rfree(skb);
_leave("");
}
/**
* rxrpc_kernel_free_skb - Free an RxRPC socket buffer
* @skb: The socket buffer to be freed
*
* Let RxRPC free its own socket buffer, permitting it to maintain debug
* accounting.
*/
void rxrpc_kernel_free_skb(struct sk_buff *skb)
{
rxrpc_free_skb(skb);
}
EXPORT_SYMBOL(rxrpc_kernel_free_skb);
/*
* Note the existence of a new-to-us socket buffer (allocated or dequeued).
*/
void rxrpc_new_skb(struct sk_buff *skb)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 0, atomic_read(&skb->users), n, here);
}
/*
* Note the re-emergence of a socket buffer from a queue or buffer.
*/
void rxrpc_see_skb(struct sk_buff *skb)
{
const void *here = __builtin_return_address(0);
if (skb) {
int n = atomic_read(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 1, atomic_read(&skb->users), n, here);
}
}
/*
* Note the addition of a ref on a socket buffer.
*/
void rxrpc_get_skb(struct sk_buff *skb)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 2, atomic_read(&skb->users), n, here);
skb_get(skb);
}
/*
* Note the destruction of a socket buffer.
*/
void rxrpc_free_skb(struct sk_buff *skb)
{
const void *here = __builtin_return_address(0);
if (skb) {
int n;
CHECK_SLAB_OKAY(&skb->users);
n = atomic_dec_return(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 3, atomic_read(&skb->users), n, here);
kfree_skb(skb);
}
}
/*
* Clear a queue of socket buffers.
*/
void rxrpc_purge_queue(struct sk_buff_head *list)
{
const void *here = __builtin_return_address(0);
struct sk_buff *skb;
while ((skb = skb_dequeue((list))) != NULL) {
int n = atomic_dec_return(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 4, atomic_read(&skb->users), n, here);
kfree_skb(skb);
}
}