forked from Minki/linux
8265792bf8
sk_add_backlog() callers usually read sk->sk_rcvbuf without owning the socket lock. This means sk_rcvbuf value can be changed by other cpus, and KCSAN complains. Add READ_ONCE() annotations to document the lockless nature of these reads. Note that writes over sk_rcvbuf should also use WRITE_ONCE(), but this will be done in separate patches to ease stable backports (if we decide this is relevant for stable trees). BUG: KCSAN: data-race in tcp_add_backlog / tcp_recvmsg write to 0xffff88812ab369f8 of 8 bytes by interrupt on cpu 1: __sk_add_backlog include/net/sock.h:902 [inline] sk_add_backlog include/net/sock.h:933 [inline] tcp_add_backlog+0x45a/0xcc0 net/ipv4/tcp_ipv4.c:1737 tcp_v4_rcv+0x1aba/0x1bf0 net/ipv4/tcp_ipv4.c:1925 ip_protocol_deliver_rcu+0x51/0x470 net/ipv4/ip_input.c:204 ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252 dst_input include/net/dst.h:442 [inline] ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523 __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004 __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118 netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208 napi_skb_finish net/core/dev.c:5671 [inline] napi_gro_receive+0x28f/0x330 net/core/dev.c:5704 receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061 virtnet_receive drivers/net/virtio_net.c:1323 [inline] virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428 napi_poll net/core/dev.c:6352 [inline] net_rx_action+0x3ae/0xa50 net/core/dev.c:6418 read to 0xffff88812ab369f8 of 8 bytes by task 7271 on cpu 0: tcp_recvmsg+0x470/0x1a30 net/ipv4/tcp.c:2047 inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838 sock_recvmsg_nosec net/socket.c:871 [inline] sock_recvmsg net/socket.c:889 [inline] sock_recvmsg+0x92/0xb0 net/socket.c:885 sock_read_iter+0x15f/0x1e0 net/socket.c:967 call_read_iter include/linux/fs.h:1864 [inline] new_sync_read+0x389/0x4f0 fs/read_write.c:414 __vfs_read+0xb1/0xc0 fs/read_write.c:427 vfs_read fs/read_write.c:461 [inline] vfs_read+0x143/0x2c0 fs/read_write.c:446 ksys_read+0xd5/0x1b0 fs/read_write.c:587 __do_sys_read fs/read_write.c:597 [inline] __se_sys_read fs/read_write.c:595 [inline] __x64_sys_read+0x4c/0x60 fs/read_write.c:595 do_syscall_64+0xcf/0x2f0 arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 7271 Comm: syz-fuzzer Not tainted 5.3.0+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
228 lines
4.2 KiB
C
228 lines
4.2 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* X.25 Packet Layer release 002
|
|
*
|
|
* This is ALPHA test software. This code may break your machine, randomly fail to work with new
|
|
* releases, misbehave and/or generally screw up. It might even work.
|
|
*
|
|
* This code REQUIRES 2.1.15 or higher
|
|
*
|
|
* History
|
|
* X.25 001 Jonathan Naylor Started coding.
|
|
* 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "X25: " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/slab.h>
|
|
#include <net/sock.h>
|
|
#include <linux/if_arp.h>
|
|
#include <net/x25.h>
|
|
#include <net/x25device.h>
|
|
|
|
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
|
|
{
|
|
struct sock *sk;
|
|
unsigned short frametype;
|
|
unsigned int lci;
|
|
|
|
if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
|
|
return 0;
|
|
|
|
frametype = skb->data[2];
|
|
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
|
|
|
|
/*
|
|
* LCI of zero is always for us, and its always a link control
|
|
* frame.
|
|
*/
|
|
if (lci == 0) {
|
|
x25_link_control(skb, nb, frametype);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Find an existing socket.
|
|
*/
|
|
if ((sk = x25_find_socket(lci, nb)) != NULL) {
|
|
int queued = 1;
|
|
|
|
skb_reset_transport_header(skb);
|
|
bh_lock_sock(sk);
|
|
if (!sock_owned_by_user(sk)) {
|
|
queued = x25_process_rx_frame(sk, skb);
|
|
} else {
|
|
queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
|
|
}
|
|
bh_unlock_sock(sk);
|
|
sock_put(sk);
|
|
return queued;
|
|
}
|
|
|
|
/*
|
|
* Is is a Call Request ? if so process it.
|
|
*/
|
|
if (frametype == X25_CALL_REQUEST)
|
|
return x25_rx_call_request(skb, nb, lci);
|
|
|
|
/*
|
|
* Its not a Call Request, nor is it a control frame.
|
|
* Can we forward it?
|
|
*/
|
|
|
|
if (x25_forward_data(lci, nb, skb)) {
|
|
if (frametype == X25_CLEAR_CONFIRMATION) {
|
|
x25_clear_forward_by_lci(lci);
|
|
}
|
|
kfree_skb(skb);
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
x25_transmit_clear_request(nb, lci, 0x0D);
|
|
*/
|
|
|
|
if (frametype != X25_CLEAR_CONFIRMATION)
|
|
pr_debug("x25_receive_data(): unknown frame type %2x\n",frametype);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
|
|
struct packet_type *ptype, struct net_device *orig_dev)
|
|
{
|
|
struct sk_buff *nskb;
|
|
struct x25_neigh *nb;
|
|
|
|
if (!net_eq(dev_net(dev), &init_net))
|
|
goto drop;
|
|
|
|
nskb = skb_copy(skb, GFP_ATOMIC);
|
|
if (!nskb)
|
|
goto drop;
|
|
kfree_skb(skb);
|
|
skb = nskb;
|
|
|
|
/*
|
|
* Packet received from unrecognised device, throw it away.
|
|
*/
|
|
nb = x25_get_neigh(dev);
|
|
if (!nb) {
|
|
pr_debug("unknown neighbour - %s\n", dev->name);
|
|
goto drop;
|
|
}
|
|
|
|
if (!pskb_may_pull(skb, 1))
|
|
return 0;
|
|
|
|
switch (skb->data[0]) {
|
|
|
|
case X25_IFACE_DATA:
|
|
skb_pull(skb, 1);
|
|
if (x25_receive_data(skb, nb)) {
|
|
x25_neigh_put(nb);
|
|
goto out;
|
|
}
|
|
break;
|
|
|
|
case X25_IFACE_CONNECT:
|
|
x25_link_established(nb);
|
|
break;
|
|
|
|
case X25_IFACE_DISCONNECT:
|
|
x25_link_terminated(nb);
|
|
break;
|
|
}
|
|
x25_neigh_put(nb);
|
|
drop:
|
|
kfree_skb(skb);
|
|
out:
|
|
return 0;
|
|
}
|
|
|
|
void x25_establish_link(struct x25_neigh *nb)
|
|
{
|
|
struct sk_buff *skb;
|
|
unsigned char *ptr;
|
|
|
|
switch (nb->dev->type) {
|
|
case ARPHRD_X25:
|
|
if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
|
|
pr_err("x25_dev: out of memory\n");
|
|
return;
|
|
}
|
|
ptr = skb_put(skb, 1);
|
|
*ptr = X25_IFACE_CONNECT;
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_LLC)
|
|
case ARPHRD_ETHER:
|
|
return;
|
|
#endif
|
|
default:
|
|
return;
|
|
}
|
|
|
|
skb->protocol = htons(ETH_P_X25);
|
|
skb->dev = nb->dev;
|
|
|
|
dev_queue_xmit(skb);
|
|
}
|
|
|
|
void x25_terminate_link(struct x25_neigh *nb)
|
|
{
|
|
struct sk_buff *skb;
|
|
unsigned char *ptr;
|
|
|
|
#if IS_ENABLED(CONFIG_LLC)
|
|
if (nb->dev->type == ARPHRD_ETHER)
|
|
return;
|
|
#endif
|
|
if (nb->dev->type != ARPHRD_X25)
|
|
return;
|
|
|
|
skb = alloc_skb(1, GFP_ATOMIC);
|
|
if (!skb) {
|
|
pr_err("x25_dev: out of memory\n");
|
|
return;
|
|
}
|
|
|
|
ptr = skb_put(skb, 1);
|
|
*ptr = X25_IFACE_DISCONNECT;
|
|
|
|
skb->protocol = htons(ETH_P_X25);
|
|
skb->dev = nb->dev;
|
|
dev_queue_xmit(skb);
|
|
}
|
|
|
|
void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
|
|
{
|
|
unsigned char *dptr;
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
switch (nb->dev->type) {
|
|
case ARPHRD_X25:
|
|
dptr = skb_push(skb, 1);
|
|
*dptr = X25_IFACE_DATA;
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_LLC)
|
|
case ARPHRD_ETHER:
|
|
kfree_skb(skb);
|
|
return;
|
|
#endif
|
|
default:
|
|
kfree_skb(skb);
|
|
return;
|
|
}
|
|
|
|
skb->protocol = htons(ETH_P_X25);
|
|
skb->dev = nb->dev;
|
|
|
|
dev_queue_xmit(skb);
|
|
}
|