mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
tcp: let tcp_send_syn_data() build headless packets
tcp_send_syn_data() is the last component in TCP transmit path to put payload in skb->head. Switch it to use page frags, so that we can remove dead code later. This allows to put more payload than previous implementation. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
f2f069da4c
commit
fbf934068f
@ -333,6 +333,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
|
||||
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
|
||||
int tcp_wmem_schedule(struct sock *sk, int copy);
|
||||
void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
|
||||
int size_goal);
|
||||
void tcp_release_cb(struct sock *sk);
|
||||
|
@ -957,7 +957,7 @@ static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
|
||||
static int tcp_wmem_schedule(struct sock *sk, int copy)
|
||||
int tcp_wmem_schedule(struct sock *sk, int copy)
|
||||
{
|
||||
int left;
|
||||
|
||||
|
@ -3802,8 +3802,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_fastopen_request *fo = tp->fastopen_req;
|
||||
int space, err = 0;
|
||||
struct page_frag *pfrag = sk_page_frag(sk);
|
||||
struct sk_buff *syn_data;
|
||||
int space, err = 0;
|
||||
|
||||
tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
|
||||
if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
|
||||
@ -3822,25 +3823,31 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
||||
|
||||
space = min_t(size_t, space, fo->size);
|
||||
|
||||
/* limit to order-0 allocations */
|
||||
space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
|
||||
|
||||
syn_data = tcp_stream_alloc_skb(sk, space, sk->sk_allocation, false);
|
||||
if (space &&
|
||||
!skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE),
|
||||
pfrag, sk->sk_allocation))
|
||||
goto fallback;
|
||||
syn_data = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, false);
|
||||
if (!syn_data)
|
||||
goto fallback;
|
||||
memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
|
||||
if (space) {
|
||||
int copied = copy_from_iter(skb_put(syn_data, space), space,
|
||||
&fo->data->msg_iter);
|
||||
if (unlikely(!copied)) {
|
||||
space = min_t(size_t, space, pfrag->size - pfrag->offset);
|
||||
space = tcp_wmem_schedule(sk, space);
|
||||
}
|
||||
if (space) {
|
||||
space = copy_page_from_iter(pfrag->page, pfrag->offset,
|
||||
space, &fo->data->msg_iter);
|
||||
if (unlikely(!space)) {
|
||||
tcp_skb_tsorted_anchor_cleanup(syn_data);
|
||||
kfree_skb(syn_data);
|
||||
goto fallback;
|
||||
}
|
||||
if (copied != space) {
|
||||
skb_trim(syn_data, copied);
|
||||
space = copied;
|
||||
}
|
||||
skb_fill_page_desc(syn_data, 0, pfrag->page,
|
||||
pfrag->offset, space);
|
||||
page_ref_inc(pfrag->page);
|
||||
pfrag->offset += space;
|
||||
skb_len_add(syn_data, space);
|
||||
skb_zcopy_set(syn_data, fo->uarg, NULL);
|
||||
}
|
||||
/* No more data pending in inet_wait_for_connect() */
|
||||
|
Loading…
Reference in New Issue
Block a user