mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix races in IPVS, from Tan Hu. 2) Missing unbind in matchall classifier, from Hangbin Liu. 3) Missing act_ife action release, from Vlad Buslov. 4) Cure lockdep splats in ila, from Cong Wang. 5) veth queue leak on link delete, from Toshiaki Makita. 6) Disable isdn's IIOCDBGVAR ioctl, it exposes kernel addresses. From Kees Cook. 7) RCU usage fixup in XDP, from Tariq Toukan. 8) Two TCP ULP fixes from Daniel Borkmann. 9) r8169 needs REALTEK_PHY as a Kconfig dependency, from Heiner Kallweit. 10) Always take tcf_lock with BH disabled, otherwise we can deadlock with rate estimator code paths. From Vlad Buslov. 11) Don't use MSI-X on RTL8106e r8169 chips, they don't resume properly. From Jian-Hong Pan. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits) ip6_vti: fix creating fallback tunnel device for vti6 ip_vti: fix a null pointer deferrence when create vti fallback tunnel r8169: don't use MSI-X on RTL8106e net: lan743x_ptp: convert to ktime_get_clocktai_ts64 net: sched: always disable bh when taking tcf_lock ip6_vti: simplify stats handling in vti6_xmit bpf: fix redirect to map under tail calls r8169: add missing Kconfig dependency tools/bpf: fix bpf selftest test_cgroup_storage failure bpf, sockmap: fix sock_map_ctx_update_elem race with exist/noexist bpf, sockmap: fix map elem deletion race with smap_stop_sock bpf, sockmap: fix leakage of smap_psock_map_entry tcp, ulp: fix leftover icsk_ulp_ops preventing sock from reattach tcp, ulp: add alias for all ulp modules bpf: fix a rcu usage warning in bpf_prog_array_copy_core() samples/bpf: all XDP samples should unload xdp/bpf prog on SIGTERM net/xdp: Fix suspicious RCU usage warning net/mlx5e: Delete unneeded function argument Documentation: networking: ti-cpsw: correct cbs parameters for Eth1 100Mb isdn: Disable IIOCDBGVAR ...
This commit is contained in:
commit
2ad0d52699
@ -4,7 +4,9 @@ Microchip KSZ Series Ethernet switches
|
||||
Required properties:
|
||||
|
||||
- compatible: For external switch chips, compatible string must be exactly one
|
||||
of: "microchip,ksz9477"
|
||||
of the following:
|
||||
- "microchip,ksz9477"
|
||||
- "microchip,ksz9897"
|
||||
|
||||
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
|
||||
required and optional properties.
|
||||
|
@ -16,6 +16,7 @@ Required properties:
|
||||
- "renesas,etheravb-rcar-gen2" as a fallback for the above
|
||||
R-Car Gen2 and RZ/G1 devices.
|
||||
|
||||
- "renesas,etheravb-r8a774a1" for the R8A774A1 SoC.
|
||||
- "renesas,etheravb-r8a7795" for the R8A7795 SoC.
|
||||
- "renesas,etheravb-r8a7796" for the R8A7796 SoC.
|
||||
- "renesas,etheravb-r8a77965" for the R8A77965 SoC.
|
||||
@ -24,7 +25,7 @@ Required properties:
|
||||
- "renesas,etheravb-r8a77990" for the R8A77990 SoC.
|
||||
- "renesas,etheravb-r8a77995" for the R8A77995 SoC.
|
||||
- "renesas,etheravb-rcar-gen3" as a fallback for the above
|
||||
R-Car Gen3 devices.
|
||||
R-Car Gen3 and RZ/G2 devices.
|
||||
|
||||
When compatible with the generic version, nodes must list the
|
||||
SoC-specific version corresponding to the platform first followed by
|
||||
|
@ -469,17 +469,18 @@ $ tc -g class show dev eth1
|
||||
|
||||
14)
|
||||
// Set rate for class A - 31 Mbit (tc0, txq2) using CBS Qdisc for Eth1
|
||||
// here only idle slope is important, others ignored
|
||||
// here only idle slope is important, others ignored, but calculated
|
||||
// for interface speed - 100Mb for eth1 port.
|
||||
// Set it +1 Mb for reserve (important!)
|
||||
$ tc qdisc add dev eth1 parent 100:3 cbs locredit -1453 \
|
||||
hicredit 47 sendslope -969000 idleslope 31000 offload 1
|
||||
$ tc qdisc add dev eth1 parent 100:3 cbs locredit -1035 \
|
||||
hicredit 465 sendslope -69000 idleslope 31000 offload 1
|
||||
net eth1: set FIFO3 bw = 31
|
||||
|
||||
15)
|
||||
// Set rate for class B - 11 Mbit (tc1, txq3) using CBS Qdisc for Eth1
|
||||
// Set it +1 Mb for reserve (important!)
|
||||
$ tc qdisc add dev eth1 parent 100:4 cbs locredit -1483 \
|
||||
hicredit 34 sendslope -989000 idleslope 11000 offload 1
|
||||
$ tc qdisc add dev eth1 parent 100:4 cbs locredit -1335 \
|
||||
hicredit 405 sendslope -89000 idleslope 11000 offload 1
|
||||
net eth1: set FIFO2 bw = 11
|
||||
|
||||
16)
|
||||
|
@ -5,19 +5,28 @@ This feature adds Linux 2.2-like transparent proxy support to current kernels.
|
||||
To use it, enable the socket match and the TPROXY target in your kernel config.
|
||||
You will need policy routing too, so be sure to enable that as well.
|
||||
|
||||
From Linux 4.18 transparent proxy support is also available in nf_tables.
|
||||
|
||||
1. Making non-local sockets work
|
||||
================================
|
||||
|
||||
The idea is that you identify packets with destination address matching a local
|
||||
socket on your box, set the packet mark to a certain value, and then match on that
|
||||
value using policy routing to have those packets delivered locally:
|
||||
socket on your box, set the packet mark to a certain value:
|
||||
|
||||
# iptables -t mangle -N DIVERT
|
||||
# iptables -t mangle -A PREROUTING -p tcp -m socket -j DIVERT
|
||||
# iptables -t mangle -A DIVERT -j MARK --set-mark 1
|
||||
# iptables -t mangle -A DIVERT -j ACCEPT
|
||||
|
||||
Alternatively you can do this in nft with the following commands:
|
||||
|
||||
# nft add table filter
|
||||
# nft add chain filter divert "{ type filter hook prerouting priority -150; }"
|
||||
# nft add rule filter divert meta l4proto tcp socket transparent 1 meta mark set 1 accept
|
||||
|
||||
And then match on that value using policy routing to have those packets
|
||||
delivered locally:
|
||||
|
||||
# ip rule add fwmark 1 lookup 100
|
||||
# ip route add local 0.0.0.0/0 dev lo table 100
|
||||
|
||||
@ -57,17 +66,28 @@ add rules like this to the iptables ruleset above:
|
||||
# iptables -t mangle -A PREROUTING -p tcp --dport 80 -j TPROXY \
|
||||
--tproxy-mark 0x1/0x1 --on-port 50080
|
||||
|
||||
Or the following rule to nft:
|
||||
|
||||
# nft add rule filter divert tcp dport 80 tproxy to :50080 meta mark set 1 accept
|
||||
|
||||
Note that for this to work you'll have to modify the proxy to enable (SOL_IP,
|
||||
IP_TRANSPARENT) for the listening socket.
|
||||
|
||||
As an example implementation, tcprdr is available here:
|
||||
https://git.breakpoint.cc/cgit/fw/tcprdr.git/
|
||||
This tool is written by Florian Westphal and it was used for testing during the
|
||||
nf_tables implementation.
|
||||
|
||||
3. Iptables extensions
|
||||
======================
|
||||
3. Iptables and nf_tables extensions
|
||||
====================================
|
||||
|
||||
To use tproxy you'll need to have the 'socket' and 'TPROXY' modules
|
||||
compiled for iptables. A patched version of iptables is available
|
||||
here: http://git.balabit.hu/?p=bazsi/iptables-tproxy.git
|
||||
To use tproxy you'll need to have the following modules compiled for iptables:
|
||||
- NETFILTER_XT_MATCH_SOCKET
|
||||
- NETFILTER_XT_TARGET_TPROXY
|
||||
|
||||
Or the floowing modules for nf_tables:
|
||||
- NFT_SOCKET
|
||||
- NFT_TPROXY
|
||||
|
||||
4. Application support
|
||||
======================
|
||||
|
@ -1640,13 +1640,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
|
||||
} else
|
||||
return -EINVAL;
|
||||
case IIOCDBGVAR:
|
||||
if (arg) {
|
||||
if (copy_to_user(argp, &dev, sizeof(ulong)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
} else
|
||||
return -EINVAL;
|
||||
break;
|
||||
return -EINVAL;
|
||||
default:
|
||||
if ((cmd & IIOCDRVCTL) == IIOCDRVCTL)
|
||||
cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK;
|
||||
|
@ -1102,6 +1102,15 @@ static const struct ksz_chip_data ksz_switch_chips[] = {
|
||||
.cpu_ports = 0x7F, /* can be configured as cpu port */
|
||||
.port_cnt = 7, /* total physical port count */
|
||||
},
|
||||
{
|
||||
.chip_id = 0x00989700,
|
||||
.dev_name = "KSZ9897",
|
||||
.num_vlans = 4096,
|
||||
.num_alus = 4096,
|
||||
.num_statics = 16,
|
||||
.cpu_ports = 0x7F, /* can be configured as cpu port */
|
||||
.port_cnt = 7, /* total physical port count */
|
||||
},
|
||||
};
|
||||
|
||||
static int ksz_switch_init(struct ksz_device *dev)
|
||||
|
@ -195,6 +195,7 @@ static int ksz_spi_remove(struct spi_device *spi)
|
||||
|
||||
static const struct of_device_id ksz_dt_ids[] = {
|
||||
{ .compatible = "microchip,ksz9477" },
|
||||
{ .compatible = "microchip,ksz9897" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ksz_dt_ids);
|
||||
|
@ -848,7 +848,7 @@ static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
|
||||
|
||||
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
|
||||
|
||||
static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv)
|
||||
static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
|
||||
{
|
||||
return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
|
||||
}
|
||||
@ -1006,7 +1006,7 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
|
||||
|
||||
static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
return mlx5e_grp_per_prio_traffic_get_num_stats(priv) +
|
||||
return mlx5e_grp_per_prio_traffic_get_num_stats() +
|
||||
mlx5e_grp_per_prio_pfc_get_num_stats(priv);
|
||||
}
|
||||
|
||||
|
@ -831,8 +831,7 @@ static void lan743x_ptp_sync_to_system_clock(struct lan743x_adapter *adapter)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
|
||||
memset(&ts, 0, sizeof(ts));
|
||||
timekeeping_clocktai64(&ts);
|
||||
ktime_get_clocktai_ts64(&ts);
|
||||
|
||||
lan743x_ptp_clock_set(adapter, ts.tv_sec, ts.tv_nsec, 0);
|
||||
}
|
||||
|
@ -100,6 +100,7 @@ config R8169
|
||||
select FW_LOADER
|
||||
select CRC32
|
||||
select PHYLIB
|
||||
select REALTEK_PHY
|
||||
---help---
|
||||
Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
|
||||
|
||||
|
@ -7071,17 +7071,20 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
|
||||
{
|
||||
unsigned int flags;
|
||||
|
||||
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
|
||||
switch (tp->mac_version) {
|
||||
case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
|
||||
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
|
||||
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
|
||||
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
|
||||
flags = PCI_IRQ_LEGACY;
|
||||
} else if (tp->mac_version == RTL_GIGA_MAC_VER_40) {
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
|
||||
/* This version was reported to have issues with resume
|
||||
* from suspend when using MSI-X
|
||||
*/
|
||||
flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
|
||||
} else {
|
||||
break;
|
||||
default:
|
||||
flags = PCI_IRQ_ALL_TYPES;
|
||||
}
|
||||
|
||||
|
@ -789,16 +789,48 @@ static int is_valid_veth_mtu(int mtu)
|
||||
return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
|
||||
}
|
||||
|
||||
static int veth_alloc_queues(struct net_device *dev)
|
||||
{
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
|
||||
if (!priv->rq)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < dev->num_rx_queues; i++)
|
||||
priv->rq[i].dev = dev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void veth_free_queues(struct net_device *dev)
|
||||
{
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
|
||||
kfree(priv->rq);
|
||||
}
|
||||
|
||||
static int veth_dev_init(struct net_device *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
|
||||
if (!dev->vstats)
|
||||
return -ENOMEM;
|
||||
|
||||
err = veth_alloc_queues(dev);
|
||||
if (err) {
|
||||
free_percpu(dev->vstats);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void veth_dev_free(struct net_device *dev)
|
||||
{
|
||||
veth_free_queues(dev);
|
||||
free_percpu(dev->vstats);
|
||||
}
|
||||
|
||||
@ -1040,31 +1072,13 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int veth_alloc_queues(struct net_device *dev)
|
||||
{
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
|
||||
priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
|
||||
if (!priv->rq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void veth_free_queues(struct net_device *dev)
|
||||
{
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
|
||||
kfree(priv->rq);
|
||||
}
|
||||
|
||||
static struct rtnl_link_ops veth_link_ops;
|
||||
|
||||
static int veth_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err, i;
|
||||
int err;
|
||||
struct net_device *peer;
|
||||
struct veth_priv *priv;
|
||||
char ifname[IFNAMSIZ];
|
||||
@ -1117,12 +1131,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
|
||||
return PTR_ERR(peer);
|
||||
}
|
||||
|
||||
err = veth_alloc_queues(peer);
|
||||
if (err) {
|
||||
put_net(net);
|
||||
goto err_peer_alloc_queues;
|
||||
}
|
||||
|
||||
if (!ifmp || !tbp[IFLA_ADDRESS])
|
||||
eth_hw_addr_random(peer);
|
||||
|
||||
@ -1151,10 +1159,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
|
||||
* should be re-allocated
|
||||
*/
|
||||
|
||||
err = veth_alloc_queues(dev);
|
||||
if (err)
|
||||
goto err_alloc_queues;
|
||||
|
||||
if (tb[IFLA_ADDRESS] == NULL)
|
||||
eth_hw_addr_random(dev);
|
||||
|
||||
@ -1174,28 +1178,20 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
|
||||
*/
|
||||
|
||||
priv = netdev_priv(dev);
|
||||
for (i = 0; i < dev->real_num_rx_queues; i++)
|
||||
priv->rq[i].dev = dev;
|
||||
rcu_assign_pointer(priv->peer, peer);
|
||||
|
||||
priv = netdev_priv(peer);
|
||||
for (i = 0; i < peer->real_num_rx_queues; i++)
|
||||
priv->rq[i].dev = peer;
|
||||
rcu_assign_pointer(priv->peer, dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_register_dev:
|
||||
veth_free_queues(dev);
|
||||
err_alloc_queues:
|
||||
/* nothing to do */
|
||||
err_configure_peer:
|
||||
unregister_netdevice(peer);
|
||||
return err;
|
||||
|
||||
err_register_peer:
|
||||
veth_free_queues(peer);
|
||||
err_peer_alloc_queues:
|
||||
free_netdev(peer);
|
||||
return err;
|
||||
}
|
||||
|
@ -543,7 +543,6 @@ struct bpf_redirect_info {
|
||||
u32 flags;
|
||||
struct bpf_map *map;
|
||||
struct bpf_map *map_to_flush;
|
||||
unsigned long map_owner;
|
||||
u32 kern_flags;
|
||||
};
|
||||
|
||||
@ -781,6 +780,8 @@ static inline bool bpf_dump_raw_ok(void)
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
const struct bpf_insn *patch, u32 len);
|
||||
|
||||
void bpf_clear_redirect_map(struct bpf_map *map);
|
||||
|
||||
static inline bool xdp_return_frame_no_direct(void)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
|
@ -447,6 +447,11 @@ static inline clock_t jiffies_delta_to_clock_t(long delta)
|
||||
return jiffies_to_clock_t(max(0L, delta));
|
||||
}
|
||||
|
||||
static inline unsigned int jiffies_delta_to_msecs(long delta)
|
||||
{
|
||||
return jiffies_to_msecs(max(0L, delta));
|
||||
}
|
||||
|
||||
extern unsigned long clock_t_to_jiffies(unsigned long x);
|
||||
extern u64 jiffies_64_to_clock_t(u64 x);
|
||||
extern u64 nsec_to_clock_t(u64 x);
|
||||
|
@ -451,9 +451,20 @@ extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
|
||||
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
|
||||
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
|
||||
|
||||
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
|
||||
size_t max_size, unsigned int cpu_mult,
|
||||
gfp_t gfp);
|
||||
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
|
||||
size_t max_size, unsigned int cpu_mult,
|
||||
gfp_t gfp, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
|
||||
({ \
|
||||
static struct lock_class_key key; \
|
||||
int ret; \
|
||||
\
|
||||
ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
|
||||
cpu_mult, gfp, #locks, &key); \
|
||||
ret; \
|
||||
})
|
||||
|
||||
void free_bucket_spinlocks(spinlock_t *locks);
|
||||
|
||||
|
@ -274,7 +274,7 @@ enum nft_set_class {
|
||||
* @space: memory class
|
||||
*/
|
||||
struct nft_set_estimate {
|
||||
unsigned int size;
|
||||
u64 size;
|
||||
enum nft_set_class lookup;
|
||||
enum nft_set_class space;
|
||||
};
|
||||
@ -336,7 +336,7 @@ struct nft_set_ops {
|
||||
const struct nft_set_elem *elem,
|
||||
unsigned int flags);
|
||||
|
||||
unsigned int (*privsize)(const struct nlattr * const nla[],
|
||||
u64 (*privsize)(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc);
|
||||
bool (*estimate)(const struct nft_set_desc *desc,
|
||||
u32 features,
|
||||
@ -1374,6 +1374,6 @@ struct nft_trans_flowtable {
|
||||
(((struct nft_trans_flowtable *)trans->data)->flowtable)
|
||||
|
||||
int __init nft_chain_filter_init(void);
|
||||
void __exit nft_chain_filter_fini(void);
|
||||
void nft_chain_filter_fini(void);
|
||||
|
||||
#endif /* _NET_NF_TABLES_H */
|
||||
|
@ -2065,6 +2065,10 @@ int tcp_set_ulp_id(struct sock *sk, const int ulp);
|
||||
void tcp_get_available_ulp(char *buf, size_t len);
|
||||
void tcp_cleanup_ulp(struct sock *sk);
|
||||
|
||||
#define MODULE_ALIAS_TCP_ULP(name) \
|
||||
__MODULE_INFO(alias, alias_userspace, name); \
|
||||
__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
|
||||
|
||||
/* Call BPF_SOCK_OPS program that returns an int. If the return value
|
||||
* is < 0, then the BPF op failed (for example if the loaded BPF
|
||||
* program does not support the chosen operation or there is no BPF
|
||||
|
@ -147,9 +147,8 @@ struct _bpf_dtab_netdev {
|
||||
|
||||
#define devmap_ifindex(fwd, map) \
|
||||
(!fwd ? 0 : \
|
||||
(!map ? 0 : \
|
||||
((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
|
||||
((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)))
|
||||
((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
|
||||
((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0))
|
||||
|
||||
#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
|
||||
trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
|
||||
|
@ -2,6 +2,8 @@
|
||||
#define _NF_OSF_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/tcp.h>
|
||||
|
||||
#define MAXGENRELEN 32
|
||||
|
||||
|
@ -21,8 +21,6 @@
|
||||
#define _XT_OSF_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/netfilter/nfnetlink_osf.h>
|
||||
|
||||
#define XT_OSF_GENRE NF_OSF_GENRE
|
||||
|
@ -1579,7 +1579,7 @@ static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
|
||||
struct bpf_prog_array_item *item;
|
||||
int i = 0;
|
||||
|
||||
item = rcu_dereference(array)->items;
|
||||
item = rcu_dereference_check(array, 1)->items;
|
||||
for (; item->prog; item++) {
|
||||
if (item->prog == &dummy_bpf_prog.prog)
|
||||
continue;
|
||||
|
@ -479,6 +479,8 @@ static void cpu_map_free(struct bpf_map *map)
|
||||
* It does __not__ ensure pending flush operations (if any) are
|
||||
* complete.
|
||||
*/
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_rcu();
|
||||
|
||||
/* To ensure all pending flush operations have completed wait for flush
|
||||
|
@ -161,6 +161,7 @@ static void dev_map_free(struct bpf_map *map)
|
||||
list_del_rcu(&dtab->list);
|
||||
spin_unlock(&dev_map_lock);
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_rcu();
|
||||
|
||||
/* To ensure all pending flush operations have completed wait for flush
|
||||
|
@ -58,6 +58,7 @@ struct bpf_stab {
|
||||
struct bpf_map map;
|
||||
struct sock **sock_map;
|
||||
struct bpf_sock_progs progs;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
struct bucket {
|
||||
@ -89,9 +90,9 @@ enum smap_psock_state {
|
||||
|
||||
struct smap_psock_map_entry {
|
||||
struct list_head list;
|
||||
struct bpf_map *map;
|
||||
struct sock **entry;
|
||||
struct htab_elem __rcu *hash_link;
|
||||
struct bpf_htab __rcu *htab;
|
||||
};
|
||||
|
||||
struct smap_psock {
|
||||
@ -343,13 +344,18 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
|
||||
e = psock_map_pop(sk, psock);
|
||||
while (e) {
|
||||
if (e->entry) {
|
||||
osk = cmpxchg(e->entry, sk, NULL);
|
||||
struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map);
|
||||
|
||||
raw_spin_lock_bh(&stab->lock);
|
||||
osk = *e->entry;
|
||||
if (osk == sk) {
|
||||
*e->entry = NULL;
|
||||
smap_release_sock(psock, sk);
|
||||
}
|
||||
raw_spin_unlock_bh(&stab->lock);
|
||||
} else {
|
||||
struct htab_elem *link = rcu_dereference(e->hash_link);
|
||||
struct bpf_htab *htab = rcu_dereference(e->htab);
|
||||
struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map);
|
||||
struct hlist_head *head;
|
||||
struct htab_elem *l;
|
||||
struct bucket *b;
|
||||
@ -370,6 +376,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
|
||||
}
|
||||
raw_spin_unlock_bh(&b->lock);
|
||||
}
|
||||
kfree(e);
|
||||
e = psock_map_pop(sk, psock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@ -1641,6 +1648,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
bpf_map_init_from_attr(&stab->map, attr);
|
||||
raw_spin_lock_init(&stab->lock);
|
||||
|
||||
/* make sure page count doesn't overflow */
|
||||
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
|
||||
@ -1675,8 +1683,10 @@ static void smap_list_map_remove(struct smap_psock *psock,
|
||||
|
||||
spin_lock_bh(&psock->maps_lock);
|
||||
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
|
||||
if (e->entry == entry)
|
||||
if (e->entry == entry) {
|
||||
list_del(&e->list);
|
||||
kfree(e);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&psock->maps_lock);
|
||||
}
|
||||
@ -1690,8 +1700,10 @@ static void smap_list_hash_remove(struct smap_psock *psock,
|
||||
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
|
||||
struct htab_elem *c = rcu_dereference(e->hash_link);
|
||||
|
||||
if (c == hash_link)
|
||||
if (c == hash_link) {
|
||||
list_del(&e->list);
|
||||
kfree(e);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&psock->maps_lock);
|
||||
}
|
||||
@ -1711,14 +1723,15 @@ static void sock_map_free(struct bpf_map *map)
|
||||
* and a grace period expire to ensure psock is really safe to remove.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
raw_spin_lock_bh(&stab->lock);
|
||||
for (i = 0; i < stab->map.max_entries; i++) {
|
||||
struct smap_psock *psock;
|
||||
struct sock *sock;
|
||||
|
||||
sock = xchg(&stab->sock_map[i], NULL);
|
||||
sock = stab->sock_map[i];
|
||||
if (!sock)
|
||||
continue;
|
||||
|
||||
stab->sock_map[i] = NULL;
|
||||
psock = smap_psock_sk(sock);
|
||||
/* This check handles a racing sock event that can get the
|
||||
* sk_callback_lock before this case but after xchg happens
|
||||
@ -1730,6 +1743,7 @@ static void sock_map_free(struct bpf_map *map)
|
||||
smap_release_sock(psock, sock);
|
||||
}
|
||||
}
|
||||
raw_spin_unlock_bh(&stab->lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
sock_map_remove_complete(stab);
|
||||
@ -1773,19 +1787,23 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
|
||||
if (k >= map->max_entries)
|
||||
return -EINVAL;
|
||||
|
||||
sock = xchg(&stab->sock_map[k], NULL);
|
||||
raw_spin_lock_bh(&stab->lock);
|
||||
sock = stab->sock_map[k];
|
||||
stab->sock_map[k] = NULL;
|
||||
raw_spin_unlock_bh(&stab->lock);
|
||||
if (!sock)
|
||||
return -EINVAL;
|
||||
|
||||
psock = smap_psock_sk(sock);
|
||||
if (!psock)
|
||||
goto out;
|
||||
|
||||
if (psock->bpf_parse)
|
||||
return 0;
|
||||
if (psock->bpf_parse) {
|
||||
write_lock_bh(&sock->sk_callback_lock);
|
||||
smap_stop_sock(psock, sock);
|
||||
write_unlock_bh(&sock->sk_callback_lock);
|
||||
}
|
||||
smap_list_map_remove(psock, &stab->sock_map[k]);
|
||||
smap_release_sock(psock, sock);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1821,11 +1839,9 @@ out:
|
||||
static int __sock_map_ctx_update_elem(struct bpf_map *map,
|
||||
struct bpf_sock_progs *progs,
|
||||
struct sock *sock,
|
||||
struct sock **map_link,
|
||||
void *key)
|
||||
{
|
||||
struct bpf_prog *verdict, *parse, *tx_msg;
|
||||
struct smap_psock_map_entry *e = NULL;
|
||||
struct smap_psock *psock;
|
||||
bool new = false;
|
||||
int err = 0;
|
||||
@ -1898,14 +1914,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
|
||||
new = true;
|
||||
}
|
||||
|
||||
if (map_link) {
|
||||
e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!e) {
|
||||
err = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
/* 3. At this point we have a reference to a valid psock that is
|
||||
* running. Attach any BPF programs needed.
|
||||
*/
|
||||
@ -1927,17 +1935,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
|
||||
write_unlock_bh(&sock->sk_callback_lock);
|
||||
}
|
||||
|
||||
/* 4. Place psock in sockmap for use and stop any programs on
|
||||
* the old sock assuming its not the same sock we are replacing
|
||||
* it with. Because we can only have a single set of programs if
|
||||
* old_sock has a strp we can stop it.
|
||||
*/
|
||||
if (map_link) {
|
||||
e->entry = map_link;
|
||||
spin_lock_bh(&psock->maps_lock);
|
||||
list_add_tail(&e->list, &psock->maps);
|
||||
spin_unlock_bh(&psock->maps_lock);
|
||||
}
|
||||
return err;
|
||||
out_free:
|
||||
smap_release_sock(psock, sock);
|
||||
@ -1948,7 +1945,6 @@ out_progs:
|
||||
}
|
||||
if (tx_msg)
|
||||
bpf_prog_put(tx_msg);
|
||||
kfree(e);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1958,36 +1954,57 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
||||
{
|
||||
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
|
||||
struct bpf_sock_progs *progs = &stab->progs;
|
||||
struct sock *osock, *sock;
|
||||
struct sock *osock, *sock = skops->sk;
|
||||
struct smap_psock_map_entry *e;
|
||||
struct smap_psock *psock;
|
||||
u32 i = *(u32 *)key;
|
||||
int err;
|
||||
|
||||
if (unlikely(flags > BPF_EXIST))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(i >= stab->map.max_entries))
|
||||
return -E2BIG;
|
||||
|
||||
sock = READ_ONCE(stab->sock_map[i]);
|
||||
if (flags == BPF_EXIST && !sock)
|
||||
return -ENOENT;
|
||||
else if (flags == BPF_NOEXIST && sock)
|
||||
return -EEXIST;
|
||||
e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
|
||||
sock = skops->sk;
|
||||
err = __sock_map_ctx_update_elem(map, progs, sock, &stab->sock_map[i],
|
||||
key);
|
||||
err = __sock_map_ctx_update_elem(map, progs, sock, key);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
osock = xchg(&stab->sock_map[i], sock);
|
||||
if (osock) {
|
||||
struct smap_psock *opsock = smap_psock_sk(osock);
|
||||
|
||||
smap_list_map_remove(opsock, &stab->sock_map[i]);
|
||||
smap_release_sock(opsock, osock);
|
||||
/* psock guaranteed to be present. */
|
||||
psock = smap_psock_sk(sock);
|
||||
raw_spin_lock_bh(&stab->lock);
|
||||
osock = stab->sock_map[i];
|
||||
if (osock && flags == BPF_NOEXIST) {
|
||||
err = -EEXIST;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!osock && flags == BPF_EXIST) {
|
||||
err = -ENOENT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
e->entry = &stab->sock_map[i];
|
||||
e->map = map;
|
||||
spin_lock_bh(&psock->maps_lock);
|
||||
list_add_tail(&e->list, &psock->maps);
|
||||
spin_unlock_bh(&psock->maps_lock);
|
||||
|
||||
stab->sock_map[i] = sock;
|
||||
if (osock) {
|
||||
psock = smap_psock_sk(osock);
|
||||
smap_list_map_remove(psock, &stab->sock_map[i]);
|
||||
smap_release_sock(psock, osock);
|
||||
}
|
||||
raw_spin_unlock_bh(&stab->lock);
|
||||
return 0;
|
||||
out_unlock:
|
||||
smap_release_sock(psock, sock);
|
||||
raw_spin_unlock_bh(&stab->lock);
|
||||
out:
|
||||
kfree(e);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2350,7 +2367,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
||||
b = __select_bucket(htab, hash);
|
||||
head = &b->head;
|
||||
|
||||
err = __sock_map_ctx_update_elem(map, progs, sock, NULL, key);
|
||||
err = __sock_map_ctx_update_elem(map, progs, sock, key);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
@ -2376,8 +2393,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
||||
}
|
||||
|
||||
rcu_assign_pointer(e->hash_link, l_new);
|
||||
rcu_assign_pointer(e->htab,
|
||||
container_of(map, struct bpf_htab, map));
|
||||
e->map = map;
|
||||
spin_lock_bh(&psock->maps_lock);
|
||||
list_add_tail(&e->list, &psock->maps);
|
||||
spin_unlock_bh(&psock->maps_lock);
|
||||
|
@ -5844,27 +5844,6 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
goto patch_call_imm;
|
||||
}
|
||||
|
||||
if (insn->imm == BPF_FUNC_redirect_map) {
|
||||
/* Note, we cannot use prog directly as imm as subsequent
|
||||
* rewrites would still change the prog pointer. The only
|
||||
* stable address we can use is aux, which also works with
|
||||
* prog clones during blinding.
|
||||
*/
|
||||
u64 addr = (unsigned long)prog->aux;
|
||||
struct bpf_insn r4_ld[] = {
|
||||
BPF_LD_IMM64(BPF_REG_4, addr),
|
||||
*insn,
|
||||
};
|
||||
cnt = ARRAY_SIZE(r4_ld);
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
|
||||
if (!new_prog)
|
||||
return -ENOMEM;
|
||||
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
}
|
||||
patch_call_imm:
|
||||
fn = env->ops->get_func_proto(insn->imm, env->prog);
|
||||
/* all functions that have prototype and verifier allowed
|
||||
|
@ -75,6 +75,7 @@ static void xsk_map_free(struct bpf_map *map)
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
int i;
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_net();
|
||||
|
||||
for (i = 0; i < map->max_entries; i++) {
|
||||
|
@ -11,8 +11,9 @@
|
||||
* to a power of 2 to be suitable as a hash table.
|
||||
*/
|
||||
|
||||
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
|
||||
size_t max_size, unsigned int cpu_mult, gfp_t gfp)
|
||||
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
|
||||
size_t max_size, unsigned int cpu_mult, gfp_t gfp,
|
||||
const char *name, struct lock_class_key *key)
|
||||
{
|
||||
spinlock_t *tlocks = NULL;
|
||||
unsigned int i, size;
|
||||
@ -33,8 +34,10 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
|
||||
tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
|
||||
if (!tlocks)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < size; i++)
|
||||
for (i = 0; i < size; i++) {
|
||||
spin_lock_init(&tlocks[i]);
|
||||
lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
|
||||
}
|
||||
}
|
||||
|
||||
*locks = tlocks;
|
||||
@ -42,7 +45,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(alloc_bucket_spinlocks);
|
||||
EXPORT_SYMBOL(__alloc_bucket_spinlocks);
|
||||
|
||||
void free_bucket_spinlocks(spinlock_t *locks)
|
||||
{
|
||||
|
@ -3246,31 +3246,33 @@ static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
|
||||
unsigned long aux)
|
||||
void bpf_clear_redirect_map(struct bpf_map *map)
|
||||
{
|
||||
return (unsigned long)xdp_prog->aux != aux;
|
||||
struct bpf_redirect_info *ri;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
ri = per_cpu_ptr(&bpf_redirect_info, cpu);
|
||||
/* Avoid polluting remote cacheline due to writes if
|
||||
* not needed. Once we pass this test, we need the
|
||||
* cmpxchg() to make sure it hasn't been changed in
|
||||
* the meantime by remote CPU.
|
||||
*/
|
||||
if (unlikely(READ_ONCE(ri->map) == map))
|
||||
cmpxchg(&ri->map, map, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog)
|
||||
struct bpf_prog *xdp_prog, struct bpf_map *map)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
unsigned long map_owner = ri->map_owner;
|
||||
struct bpf_map *map = ri->map;
|
||||
u32 index = ri->ifindex;
|
||||
void *fwd = NULL;
|
||||
int err;
|
||||
|
||||
ri->ifindex = 0;
|
||||
ri->map = NULL;
|
||||
ri->map_owner = 0;
|
||||
|
||||
if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
|
||||
err = -EFAULT;
|
||||
map = NULL;
|
||||
goto err;
|
||||
}
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
|
||||
fwd = __xdp_map_lookup_elem(map, index);
|
||||
if (!fwd) {
|
||||
@ -3296,12 +3298,13 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
struct bpf_map *map = READ_ONCE(ri->map);
|
||||
struct net_device *fwd;
|
||||
u32 index = ri->ifindex;
|
||||
int err;
|
||||
|
||||
if (ri->map)
|
||||
return xdp_do_redirect_map(dev, xdp, xdp_prog);
|
||||
if (map)
|
||||
return xdp_do_redirect_map(dev, xdp, xdp_prog, map);
|
||||
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
||||
ri->ifindex = 0;
|
||||
@ -3325,24 +3328,17 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect);
|
||||
static int xdp_do_generic_redirect_map(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog)
|
||||
struct bpf_prog *xdp_prog,
|
||||
struct bpf_map *map)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
unsigned long map_owner = ri->map_owner;
|
||||
struct bpf_map *map = ri->map;
|
||||
u32 index = ri->ifindex;
|
||||
void *fwd = NULL;
|
||||
int err = 0;
|
||||
|
||||
ri->ifindex = 0;
|
||||
ri->map = NULL;
|
||||
ri->map_owner = 0;
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
|
||||
if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
|
||||
err = -EFAULT;
|
||||
map = NULL;
|
||||
goto err;
|
||||
}
|
||||
fwd = __xdp_map_lookup_elem(map, index);
|
||||
if (unlikely(!fwd)) {
|
||||
err = -EINVAL;
|
||||
@ -3379,13 +3375,14 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
||||
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
struct bpf_map *map = READ_ONCE(ri->map);
|
||||
u32 index = ri->ifindex;
|
||||
struct net_device *fwd;
|
||||
int err = 0;
|
||||
|
||||
if (ri->map)
|
||||
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
|
||||
|
||||
if (map)
|
||||
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
|
||||
map);
|
||||
ri->ifindex = 0;
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
||||
if (unlikely(!fwd)) {
|
||||
@ -3416,8 +3413,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
|
||||
|
||||
ri->ifindex = ifindex;
|
||||
ri->flags = flags;
|
||||
ri->map = NULL;
|
||||
ri->map_owner = 0;
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
|
||||
return XDP_REDIRECT;
|
||||
}
|
||||
@ -3430,8 +3426,8 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
|
||||
unsigned long, map_owner)
|
||||
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
|
||||
u64, flags)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
|
||||
@ -3440,15 +3436,11 @@ BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags
|
||||
|
||||
ri->ifindex = ifindex;
|
||||
ri->flags = flags;
|
||||
ri->map = map;
|
||||
ri->map_owner = map_owner;
|
||||
WRITE_ONCE(ri->map, map);
|
||||
|
||||
return XDP_REDIRECT;
|
||||
}
|
||||
|
||||
/* Note, arg4 is hidden from users and populated by the verifier
|
||||
* with the right pointer.
|
||||
*/
|
||||
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
|
||||
.func = bpf_xdp_redirect_map,
|
||||
.gpl_only = false,
|
||||
|
@ -98,23 +98,15 @@ static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
|
||||
{
|
||||
struct xdp_mem_allocator *xa;
|
||||
int id = xdp_rxq->mem.id;
|
||||
int err;
|
||||
|
||||
if (id == 0)
|
||||
return;
|
||||
|
||||
mutex_lock(&mem_id_lock);
|
||||
|
||||
xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
|
||||
if (!xa) {
|
||||
mutex_unlock(&mem_id_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
err = rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params);
|
||||
WARN_ON(err);
|
||||
|
||||
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
|
||||
xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
|
||||
if (xa && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
|
||||
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
|
||||
|
||||
mutex_unlock(&mem_id_lock);
|
||||
}
|
||||
|
@ -438,7 +438,8 @@ static int __net_init vti_init_net(struct net *net)
|
||||
if (err)
|
||||
return err;
|
||||
itn = net_generic(net, vti_net_id);
|
||||
vti_fb_tunnel_init(itn->fb_tunnel_dev);
|
||||
if (itn->fb_tunnel_dev)
|
||||
vti_fb_tunnel_init(itn->fb_tunnel_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
|
||||
#ifdef CONFIG_MODULES
|
||||
if (!ulp && capable(CAP_NET_ADMIN)) {
|
||||
rcu_read_unlock();
|
||||
request_module("%s", name);
|
||||
request_module("tcp-ulp-%s", name);
|
||||
rcu_read_lock();
|
||||
ulp = tcp_ulp_find(name);
|
||||
}
|
||||
@ -129,6 +129,8 @@ void tcp_cleanup_ulp(struct sock *sk)
|
||||
if (icsk->icsk_ulp_ops->release)
|
||||
icsk->icsk_ulp_ops->release(sk);
|
||||
module_put(icsk->icsk_ulp_ops->owner);
|
||||
|
||||
icsk->icsk_ulp_ops = NULL;
|
||||
}
|
||||
|
||||
/* Change upper layer protocol for socket */
|
||||
|
@ -503,17 +503,9 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
|
||||
skb->dev = skb_dst(skb)->dev;
|
||||
|
||||
err = dst_output(t->net, skb->sk, skb);
|
||||
if (net_xmit_eval(err) == 0) {
|
||||
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
|
||||
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->tx_bytes += pkt_len;
|
||||
tstats->tx_packets++;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
} else {
|
||||
stats->tx_errors++;
|
||||
stats->tx_aborted_errors++;
|
||||
}
|
||||
if (net_xmit_eval(err) == 0)
|
||||
err = pkt_len;
|
||||
iptunnel_xmit_stats(dev, err);
|
||||
|
||||
return 0;
|
||||
tx_err_link_failure:
|
||||
@ -1114,6 +1106,8 @@ static int __net_init vti6_init_net(struct net *net)
|
||||
ip6n->tnls[0] = ip6n->tnls_wc;
|
||||
ip6n->tnls[1] = ip6n->tnls_r_l;
|
||||
|
||||
if (!net_has_fallback_tunnels(net))
|
||||
return 0;
|
||||
err = -ENOMEM;
|
||||
ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
|
||||
NET_NAME_UNKNOWN, vti6_dev_setup);
|
||||
|
@ -26,6 +26,12 @@ static bool rpfilter_addr_unicast(const struct in6_addr *addr)
|
||||
return addr_type & IPV6_ADDR_UNICAST;
|
||||
}
|
||||
|
||||
static bool rpfilter_addr_linklocal(const struct in6_addr *addr)
|
||||
{
|
||||
int addr_type = ipv6_addr_type(addr);
|
||||
return addr_type & IPV6_ADDR_LINKLOCAL;
|
||||
}
|
||||
|
||||
static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
|
||||
const struct net_device *dev, u8 flags)
|
||||
{
|
||||
@ -48,7 +54,11 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
|
||||
}
|
||||
|
||||
fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
|
||||
if ((flags & XT_RPFILTER_LOOSE) == 0)
|
||||
|
||||
if (rpfilter_addr_linklocal(&iph->saddr)) {
|
||||
lookup_flags |= RT6_LOOKUP_F_IFACE;
|
||||
fl6.flowi6_oif = dev->ifindex;
|
||||
} else if ((flags & XT_RPFILTER_LOOSE) == 0)
|
||||
fl6.flowi6_oif = dev->ifindex;
|
||||
|
||||
rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
|
||||
|
@ -1117,24 +1117,28 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
if (cp->af == AF_INET6)
|
||||
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
|
||||
"%s %04X %-11s %7lu%s\n",
|
||||
"%s %04X %-11s %7u%s\n",
|
||||
ip_vs_proto_name(cp->protocol),
|
||||
&cp->caddr.in6, ntohs(cp->cport),
|
||||
&cp->vaddr.in6, ntohs(cp->vport),
|
||||
dbuf, ntohs(cp->dport),
|
||||
ip_vs_state_name(cp),
|
||||
(cp->timer.expires-jiffies)/HZ, pe_data);
|
||||
jiffies_delta_to_msecs(cp->timer.expires -
|
||||
jiffies) / 1000,
|
||||
pe_data);
|
||||
else
|
||||
#endif
|
||||
seq_printf(seq,
|
||||
"%-3s %08X %04X %08X %04X"
|
||||
" %s %04X %-11s %7lu%s\n",
|
||||
" %s %04X %-11s %7u%s\n",
|
||||
ip_vs_proto_name(cp->protocol),
|
||||
ntohl(cp->caddr.ip), ntohs(cp->cport),
|
||||
ntohl(cp->vaddr.ip), ntohs(cp->vport),
|
||||
dbuf, ntohs(cp->dport),
|
||||
ip_vs_state_name(cp),
|
||||
(cp->timer.expires-jiffies)/HZ, pe_data);
|
||||
jiffies_delta_to_msecs(cp->timer.expires -
|
||||
jiffies) / 1000,
|
||||
pe_data);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -1179,26 +1183,28 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
if (cp->af == AF_INET6)
|
||||
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
|
||||
"%s %04X %-11s %-6s %7lu\n",
|
||||
"%s %04X %-11s %-6s %7u\n",
|
||||
ip_vs_proto_name(cp->protocol),
|
||||
&cp->caddr.in6, ntohs(cp->cport),
|
||||
&cp->vaddr.in6, ntohs(cp->vport),
|
||||
dbuf, ntohs(cp->dport),
|
||||
ip_vs_state_name(cp),
|
||||
ip_vs_origin_name(cp->flags),
|
||||
(cp->timer.expires-jiffies)/HZ);
|
||||
jiffies_delta_to_msecs(cp->timer.expires -
|
||||
jiffies) / 1000);
|
||||
else
|
||||
#endif
|
||||
seq_printf(seq,
|
||||
"%-3s %08X %04X %08X %04X "
|
||||
"%s %04X %-11s %-6s %7lu\n",
|
||||
"%s %04X %-11s %-6s %7u\n",
|
||||
ip_vs_proto_name(cp->protocol),
|
||||
ntohl(cp->caddr.ip), ntohs(cp->cport),
|
||||
ntohl(cp->vaddr.ip), ntohs(cp->vport),
|
||||
dbuf, ntohs(cp->dport),
|
||||
ip_vs_state_name(cp),
|
||||
ip_vs_origin_name(cp->flags),
|
||||
(cp->timer.expires-jiffies)/HZ);
|
||||
jiffies_delta_to_msecs(cp->timer.expires -
|
||||
jiffies) / 1000);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1972,13 +1972,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
|
||||
if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
|
||||
/* the destination server is not available */
|
||||
|
||||
if (sysctl_expire_nodest_conn(ipvs)) {
|
||||
__u32 flags = cp->flags;
|
||||
|
||||
/* when timer already started, silently drop the packet.*/
|
||||
if (timer_pending(&cp->timer))
|
||||
__ip_vs_conn_put(cp);
|
||||
else
|
||||
ip_vs_conn_put(cp);
|
||||
|
||||
if (sysctl_expire_nodest_conn(ipvs) &&
|
||||
!(flags & IP_VS_CONN_F_ONE_PACKET)) {
|
||||
/* try to expire the connection immediately */
|
||||
ip_vs_conn_expire_now(cp);
|
||||
}
|
||||
/* don't restart its timer, and silently
|
||||
drop the packet. */
|
||||
__ip_vs_conn_put(cp);
|
||||
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
|
@ -846,6 +846,21 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[])
|
||||
#endif
|
||||
}
|
||||
|
||||
static int ctnetlink_start(struct netlink_callback *cb)
|
||||
{
|
||||
const struct nlattr * const *cda = cb->data;
|
||||
struct ctnetlink_filter *filter = NULL;
|
||||
|
||||
if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
|
||||
filter = ctnetlink_alloc_filter(cda);
|
||||
if (IS_ERR(filter))
|
||||
return PTR_ERR(filter);
|
||||
}
|
||||
|
||||
cb->data = filter;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
|
||||
{
|
||||
struct ctnetlink_filter *filter = data;
|
||||
@ -1290,19 +1305,12 @@ static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
|
||||
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
.start = ctnetlink_start,
|
||||
.dump = ctnetlink_dump_table,
|
||||
.done = ctnetlink_done,
|
||||
.data = (void *)cda,
|
||||
};
|
||||
|
||||
if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
|
||||
struct ctnetlink_filter *filter;
|
||||
|
||||
filter = ctnetlink_alloc_filter(cda);
|
||||
if (IS_ERR(filter))
|
||||
return PTR_ERR(filter);
|
||||
|
||||
c.data = filter;
|
||||
}
|
||||
return netlink_dump_start(ctnl, skb, nlh, &c);
|
||||
}
|
||||
|
||||
|
@ -312,7 +312,9 @@ void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto)
|
||||
__nf_ct_l4proto_unregister_one(l4proto);
|
||||
mutex_unlock(&nf_ct_proto_mutex);
|
||||
|
||||
synchronize_rcu();
|
||||
synchronize_net();
|
||||
/* Remove all contrack entries for this protocol */
|
||||
nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister_one);
|
||||
|
||||
@ -333,14 +335,17 @@ static void
|
||||
nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[],
|
||||
unsigned int num_proto)
|
||||
{
|
||||
int i;
|
||||
|
||||
mutex_lock(&nf_ct_proto_mutex);
|
||||
while (num_proto-- != 0)
|
||||
__nf_ct_l4proto_unregister_one(l4proto[num_proto]);
|
||||
for (i = 0; i < num_proto; i++)
|
||||
__nf_ct_l4proto_unregister_one(l4proto[i]);
|
||||
mutex_unlock(&nf_ct_proto_mutex);
|
||||
|
||||
synchronize_net();
|
||||
/* Remove all contrack entries for this protocol */
|
||||
nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
|
||||
|
||||
for (i = 0; i < num_proto; i++)
|
||||
nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto[i]);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -3354,7 +3354,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
||||
struct nft_set *set;
|
||||
struct nft_ctx ctx;
|
||||
char *name;
|
||||
unsigned int size;
|
||||
u64 size;
|
||||
u64 timeout;
|
||||
u32 ktype, dtype, flags, policy, gc_int, objtype;
|
||||
struct nft_set_desc desc;
|
||||
@ -5925,10 +5925,7 @@ static int nf_tables_flowtable_event(struct notifier_block *this,
|
||||
if (event != NETDEV_UNREGISTER)
|
||||
return 0;
|
||||
|
||||
net = maybe_get_net(dev_net(dev));
|
||||
if (!net)
|
||||
return 0;
|
||||
|
||||
net = dev_net(dev);
|
||||
mutex_lock(&net->nft.commit_mutex);
|
||||
list_for_each_entry(table, &net->nft.tables, list) {
|
||||
list_for_each_entry(flowtable, &table->flowtables, list) {
|
||||
@ -5936,7 +5933,7 @@ static int nf_tables_flowtable_event(struct notifier_block *this,
|
||||
}
|
||||
}
|
||||
mutex_unlock(&net->nft.commit_mutex);
|
||||
put_net(net);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
@ -7273,21 +7270,36 @@ static int __init nf_tables_module_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
nft_chain_filter_init();
|
||||
|
||||
err = nf_tables_core_module_init();
|
||||
err = register_pernet_subsys(&nf_tables_net_ops);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = nft_chain_filter_init();
|
||||
if (err < 0)
|
||||
goto err1;
|
||||
|
||||
err = nf_tables_core_module_init();
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
|
||||
err = register_netdevice_notifier(&nf_tables_flowtable_notifier);
|
||||
if (err < 0)
|
||||
goto err3;
|
||||
|
||||
/* must be last */
|
||||
err = nfnetlink_subsys_register(&nf_tables_subsys);
|
||||
if (err < 0)
|
||||
goto err;
|
||||
goto err4;
|
||||
|
||||
register_netdevice_notifier(&nf_tables_flowtable_notifier);
|
||||
|
||||
return register_pernet_subsys(&nf_tables_net_ops);
|
||||
err:
|
||||
return err;
|
||||
err4:
|
||||
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
|
||||
err3:
|
||||
nf_tables_core_module_exit();
|
||||
err2:
|
||||
nft_chain_filter_fini();
|
||||
err1:
|
||||
unregister_pernet_subsys(&nf_tables_net_ops);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -238,29 +238,33 @@ static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = {
|
||||
[NFACCT_FILTER_VALUE] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static struct nfacct_filter *
|
||||
nfacct_filter_alloc(const struct nlattr * const attr)
|
||||
static int nfnl_acct_start(struct netlink_callback *cb)
|
||||
{
|
||||
struct nfacct_filter *filter;
|
||||
const struct nlattr *const attr = cb->data;
|
||||
struct nlattr *tb[NFACCT_FILTER_MAX + 1];
|
||||
struct nfacct_filter *filter;
|
||||
int err;
|
||||
|
||||
if (!attr)
|
||||
return 0;
|
||||
|
||||
err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy,
|
||||
NULL);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE])
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL);
|
||||
if (!filter)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK]));
|
||||
filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE]));
|
||||
cb->data = filter;
|
||||
|
||||
return filter;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfnl_acct_get(struct net *net, struct sock *nfnl,
|
||||
@ -275,18 +279,11 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl,
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
.dump = nfnl_acct_dump,
|
||||
.start = nfnl_acct_start,
|
||||
.done = nfnl_acct_done,
|
||||
.data = (void *)tb[NFACCT_FILTER],
|
||||
};
|
||||
|
||||
if (tb[NFACCT_FILTER]) {
|
||||
struct nfacct_filter *filter;
|
||||
|
||||
filter = nfacct_filter_alloc(tb[NFACCT_FILTER]);
|
||||
if (IS_ERR(filter))
|
||||
return PTR_ERR(filter);
|
||||
|
||||
c.data = filter;
|
||||
}
|
||||
return netlink_dump_start(nfnl, skb, nlh, &c);
|
||||
}
|
||||
|
||||
|
@ -293,6 +293,13 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
|
||||
if (strcmp(basechain->dev_name, dev->name) != 0)
|
||||
return;
|
||||
|
||||
/* UNREGISTER events are also happpening on netns exit.
|
||||
*
|
||||
* Altough nf_tables core releases all tables/chains, only
|
||||
* this event handler provides guarantee that
|
||||
* basechain.ops->dev is still accessible, so we cannot
|
||||
* skip exiting net namespaces.
|
||||
*/
|
||||
__nft_release_basechain(ctx);
|
||||
break;
|
||||
case NETDEV_CHANGENAME:
|
||||
@ -318,10 +325,6 @@ static int nf_tables_netdev_event(struct notifier_block *this,
|
||||
event != NETDEV_CHANGENAME)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
ctx.net = maybe_get_net(ctx.net);
|
||||
if (!ctx.net)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mutex_lock(&ctx.net->nft.commit_mutex);
|
||||
list_for_each_entry(table, &ctx.net->nft.tables, list) {
|
||||
if (table->family != NFPROTO_NETDEV)
|
||||
@ -338,7 +341,6 @@ static int nf_tables_netdev_event(struct notifier_block *this,
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctx.net->nft.commit_mutex);
|
||||
put_net(ctx.net);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -392,7 +394,7 @@ int __init nft_chain_filter_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __exit nft_chain_filter_fini(void)
|
||||
void nft_chain_filter_fini(void)
|
||||
{
|
||||
nft_chain_filter_bridge_fini();
|
||||
nft_chain_filter_inet_fini();
|
||||
|
@ -832,12 +832,13 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
|
||||
__u8 l4num;
|
||||
int ret;
|
||||
|
||||
if (!tb[NFTA_CT_TIMEOUT_L3PROTO] ||
|
||||
!tb[NFTA_CT_TIMEOUT_L4PROTO] ||
|
||||
if (!tb[NFTA_CT_TIMEOUT_L4PROTO] ||
|
||||
!tb[NFTA_CT_TIMEOUT_DATA])
|
||||
return -EINVAL;
|
||||
|
||||
l3num = ntohs(nla_get_be16(tb[NFTA_CT_TIMEOUT_L3PROTO]));
|
||||
if (tb[NFTA_CT_TIMEOUT_L3PROTO])
|
||||
l3num = ntohs(nla_get_be16(tb[NFTA_CT_TIMEOUT_L3PROTO]));
|
||||
|
||||
l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]);
|
||||
priv->l4proto = l4num;
|
||||
|
||||
|
@ -187,8 +187,6 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
||||
if (tb[NFTA_DYNSET_EXPR] != NULL) {
|
||||
if (!(set->flags & NFT_SET_EVAL))
|
||||
return -EINVAL;
|
||||
if (!nft_set_is_anonymous(set))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]);
|
||||
if (IS_ERR(priv->expr))
|
||||
|
@ -248,13 +248,13 @@ static inline u32 nft_bitmap_size(u32 klen)
|
||||
return ((2 << ((klen * BITS_PER_BYTE) - 1)) / BITS_PER_BYTE) << 1;
|
||||
}
|
||||
|
||||
static inline u32 nft_bitmap_total_size(u32 klen)
|
||||
static inline u64 nft_bitmap_total_size(u32 klen)
|
||||
{
|
||||
return sizeof(struct nft_bitmap) + nft_bitmap_size(klen);
|
||||
}
|
||||
|
||||
static unsigned int nft_bitmap_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
static u64 nft_bitmap_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
{
|
||||
u32 klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
|
||||
|
||||
|
@ -341,8 +341,8 @@ schedule:
|
||||
nft_set_gc_interval(set));
|
||||
}
|
||||
|
||||
static unsigned int nft_rhash_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
static u64 nft_rhash_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
{
|
||||
return sizeof(struct nft_rhash);
|
||||
}
|
||||
@ -585,8 +585,8 @@ cont:
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int nft_hash_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
static u64 nft_hash_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
{
|
||||
return sizeof(struct nft_hash) +
|
||||
nft_hash_buckets(desc->size) * sizeof(struct hlist_head);
|
||||
|
@ -411,8 +411,8 @@ static void nft_rbtree_gc(struct work_struct *work)
|
||||
nft_set_gc_interval(set));
|
||||
}
|
||||
|
||||
static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
{
|
||||
return sizeof(struct nft_rbtree);
|
||||
}
|
||||
|
@ -82,13 +82,15 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
|
||||
const struct nft_tproxy *priv = nft_expr_priv(expr);
|
||||
struct sk_buff *skb = pkt->skb;
|
||||
const struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
struct in6_addr taddr = {0};
|
||||
struct in6_addr taddr;
|
||||
int thoff = pkt->xt.thoff;
|
||||
struct udphdr _hdr, *hp;
|
||||
__be16 tport = 0;
|
||||
struct sock *sk;
|
||||
int l4proto;
|
||||
|
||||
memset(&taddr, 0, sizeof(taddr));
|
||||
|
||||
if (!pkt->tprot_set) {
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
return;
|
||||
|
@ -1178,12 +1178,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
|
||||
if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
|
||||
return NULL;
|
||||
|
||||
/* __GFP_NORETRY is not fully supported by kvmalloc but it should
|
||||
* work reasonably well if sz is too large and bail out rather
|
||||
* than shoot all processes down before realizing there is nothing
|
||||
* more to reclaim.
|
||||
*/
|
||||
info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY);
|
||||
info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!info)
|
||||
return NULL;
|
||||
|
||||
|
@ -147,7 +147,7 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
|
||||
struct tcf_t tm;
|
||||
int ret;
|
||||
|
||||
spin_lock(&prog->tcf_lock);
|
||||
spin_lock_bh(&prog->tcf_lock);
|
||||
opt.action = prog->tcf_action;
|
||||
if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
|
||||
goto nla_put_failure;
|
||||
@ -164,11 +164,11 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
|
||||
TCA_ACT_BPF_PAD))
|
||||
goto nla_put_failure;
|
||||
|
||||
spin_unlock(&prog->tcf_lock);
|
||||
spin_unlock_bh(&prog->tcf_lock);
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
spin_unlock(&prog->tcf_lock);
|
||||
spin_unlock_bh(&prog->tcf_lock);
|
||||
nlmsg_trim(skb, tp);
|
||||
return -1;
|
||||
}
|
||||
@ -340,7 +340,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
prog = to_bpf(*act);
|
||||
|
||||
spin_lock(&prog->tcf_lock);
|
||||
spin_lock_bh(&prog->tcf_lock);
|
||||
if (res != ACT_P_CREATED)
|
||||
tcf_bpf_prog_fill_cfg(prog, &old);
|
||||
|
||||
@ -352,7 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
prog->tcf_action = parm->action;
|
||||
rcu_assign_pointer(prog->filter, cfg.filter);
|
||||
spin_unlock(&prog->tcf_lock);
|
||||
spin_unlock_bh(&prog->tcf_lock);
|
||||
|
||||
if (res == ACT_P_CREATED) {
|
||||
tcf_idr_insert(tn, *act);
|
||||
|
@ -96,11 +96,11 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
|
||||
}
|
||||
params_new->update_flags = parm->update_flags;
|
||||
|
||||
spin_lock(&p->tcf_lock);
|
||||
spin_lock_bh(&p->tcf_lock);
|
||||
p->tcf_action = parm->action;
|
||||
rcu_swap_protected(p->params, params_new,
|
||||
lockdep_is_held(&p->tcf_lock));
|
||||
spin_unlock(&p->tcf_lock);
|
||||
spin_unlock_bh(&p->tcf_lock);
|
||||
|
||||
if (params_new)
|
||||
kfree_rcu(params_new, rcu);
|
||||
@ -604,7 +604,7 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
|
||||
};
|
||||
struct tcf_t t;
|
||||
|
||||
spin_lock(&p->tcf_lock);
|
||||
spin_lock_bh(&p->tcf_lock);
|
||||
params = rcu_dereference_protected(p->params,
|
||||
lockdep_is_held(&p->tcf_lock));
|
||||
opt.action = p->tcf_action;
|
||||
@ -616,12 +616,12 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
|
||||
tcf_tm_dump(&t, &p->tcf_tm);
|
||||
if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
|
||||
goto nla_put_failure;
|
||||
spin_unlock(&p->tcf_lock);
|
||||
spin_unlock_bh(&p->tcf_lock);
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
spin_unlock(&p->tcf_lock);
|
||||
spin_unlock_bh(&p->tcf_lock);
|
||||
nlmsg_trim(skb, b);
|
||||
return -1;
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
gact = to_gact(*a);
|
||||
|
||||
spin_lock(&gact->tcf_lock);
|
||||
spin_lock_bh(&gact->tcf_lock);
|
||||
gact->tcf_action = parm->action;
|
||||
#ifdef CONFIG_GACT_PROB
|
||||
if (p_parm) {
|
||||
@ -126,7 +126,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
|
||||
gact->tcfg_ptype = p_parm->ptype;
|
||||
}
|
||||
#endif
|
||||
spin_unlock(&gact->tcf_lock);
|
||||
spin_unlock_bh(&gact->tcf_lock);
|
||||
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_insert(tn, *a);
|
||||
@ -183,7 +183,7 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
};
|
||||
struct tcf_t t;
|
||||
|
||||
spin_lock(&gact->tcf_lock);
|
||||
spin_lock_bh(&gact->tcf_lock);
|
||||
opt.action = gact->tcf_action;
|
||||
if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
|
||||
goto nla_put_failure;
|
||||
@ -202,12 +202,12 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
tcf_tm_dump(&t, &gact->tcf_tm);
|
||||
if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
|
||||
goto nla_put_failure;
|
||||
spin_unlock(&gact->tcf_lock);
|
||||
spin_unlock_bh(&gact->tcf_lock);
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
spin_unlock(&gact->tcf_lock);
|
||||
spin_unlock_bh(&gact->tcf_lock);
|
||||
nlmsg_trim(skb, b);
|
||||
return -1;
|
||||
}
|
||||
|
@ -551,9 +551,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
|
||||
NULL, NULL);
|
||||
if (err) {
|
||||
metadata_parse_err:
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_release(*a, bind);
|
||||
|
||||
if (exists)
|
||||
spin_unlock_bh(&ife->tcf_lock);
|
||||
tcf_idr_release(*a, bind);
|
||||
@ -574,11 +571,10 @@ metadata_parse_err:
|
||||
*/
|
||||
err = use_all_metadata(ife);
|
||||
if (err) {
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_release(*a, bind);
|
||||
|
||||
if (exists)
|
||||
spin_unlock_bh(&ife->tcf_lock);
|
||||
tcf_idr_release(*a, bind);
|
||||
|
||||
kfree(p);
|
||||
return err;
|
||||
}
|
||||
|
@ -159,14 +159,14 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
|
||||
}
|
||||
m = to_mirred(*a);
|
||||
|
||||
spin_lock(&m->tcf_lock);
|
||||
spin_lock_bh(&m->tcf_lock);
|
||||
m->tcf_action = parm->action;
|
||||
m->tcfm_eaction = parm->eaction;
|
||||
|
||||
if (parm->ifindex) {
|
||||
dev = dev_get_by_index(net, parm->ifindex);
|
||||
if (!dev) {
|
||||
spin_unlock(&m->tcf_lock);
|
||||
spin_unlock_bh(&m->tcf_lock);
|
||||
tcf_idr_release(*a, bind);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -177,7 +177,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
|
||||
dev_put(dev);
|
||||
m->tcfm_mac_header_xmit = mac_header_xmit;
|
||||
}
|
||||
spin_unlock(&m->tcf_lock);
|
||||
spin_unlock_bh(&m->tcf_lock);
|
||||
|
||||
if (ret == ACT_P_CREATED) {
|
||||
spin_lock(&mirred_list_lock);
|
||||
@ -305,7 +305,7 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
|
||||
struct net_device *dev;
|
||||
struct tcf_t t;
|
||||
|
||||
spin_lock(&m->tcf_lock);
|
||||
spin_lock_bh(&m->tcf_lock);
|
||||
opt.action = m->tcf_action;
|
||||
opt.eaction = m->tcfm_eaction;
|
||||
dev = tcf_mirred_dev_dereference(m);
|
||||
@ -318,12 +318,12 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
|
||||
tcf_tm_dump(&t, &m->tcf_tm);
|
||||
if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
|
||||
goto nla_put_failure;
|
||||
spin_unlock(&m->tcf_lock);
|
||||
spin_unlock_bh(&m->tcf_lock);
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
spin_unlock(&m->tcf_lock);
|
||||
spin_unlock_bh(&m->tcf_lock);
|
||||
nlmsg_trim(skb, b);
|
||||
return -1;
|
||||
}
|
||||
@ -356,7 +356,7 @@ static int mirred_device_event(struct notifier_block *unused,
|
||||
if (event == NETDEV_UNREGISTER) {
|
||||
spin_lock(&mirred_list_lock);
|
||||
list_for_each_entry(m, &mirred_list, tcfm_list) {
|
||||
spin_lock(&m->tcf_lock);
|
||||
spin_lock_bh(&m->tcf_lock);
|
||||
if (tcf_mirred_dev_dereference(m) == dev) {
|
||||
dev_put(dev);
|
||||
/* Note : no rcu grace period necessary, as
|
||||
@ -364,7 +364,7 @@ static int mirred_device_event(struct notifier_block *unused,
|
||||
*/
|
||||
RCU_INIT_POINTER(m->tcfm_dev, NULL);
|
||||
}
|
||||
spin_unlock(&m->tcf_lock);
|
||||
spin_unlock_bh(&m->tcf_lock);
|
||||
}
|
||||
spin_unlock(&mirred_list_lock);
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
|
||||
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
|
||||
struct psample_group *psample_group;
|
||||
struct tc_sample *parm;
|
||||
u32 psample_group_num;
|
||||
struct tcf_sample *s;
|
||||
bool exists = false;
|
||||
int ret, err;
|
||||
@ -78,25 +79,27 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
|
||||
tcf_idr_release(*a, bind);
|
||||
return -EEXIST;
|
||||
}
|
||||
s = to_sample(*a);
|
||||
|
||||
spin_lock(&s->tcf_lock);
|
||||
s->tcf_action = parm->action;
|
||||
s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
|
||||
s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
|
||||
psample_group = psample_group_get(net, s->psample_group_num);
|
||||
psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
|
||||
psample_group = psample_group_get(net, psample_group_num);
|
||||
if (!psample_group) {
|
||||
spin_unlock(&s->tcf_lock);
|
||||
tcf_idr_release(*a, bind);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
s = to_sample(*a);
|
||||
|
||||
spin_lock_bh(&s->tcf_lock);
|
||||
s->tcf_action = parm->action;
|
||||
s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
|
||||
s->psample_group_num = psample_group_num;
|
||||
RCU_INIT_POINTER(s->psample_group, psample_group);
|
||||
|
||||
if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
|
||||
s->truncate = true;
|
||||
s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
|
||||
}
|
||||
spin_unlock(&s->tcf_lock);
|
||||
spin_unlock_bh(&s->tcf_lock);
|
||||
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_insert(tn, *a);
|
||||
@ -183,7 +186,7 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
};
|
||||
struct tcf_t t;
|
||||
|
||||
spin_lock(&s->tcf_lock);
|
||||
spin_lock_bh(&s->tcf_lock);
|
||||
opt.action = s->tcf_action;
|
||||
if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt))
|
||||
goto nla_put_failure;
|
||||
@ -201,12 +204,12 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
|
||||
if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num))
|
||||
goto nla_put_failure;
|
||||
spin_unlock(&s->tcf_lock);
|
||||
spin_unlock_bh(&s->tcf_lock);
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
spin_unlock(&s->tcf_lock);
|
||||
spin_unlock_bh(&s->tcf_lock);
|
||||
nlmsg_trim(skb, b);
|
||||
return -1;
|
||||
}
|
||||
|
@ -354,11 +354,11 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
params_new->tcft_action = parm->t_action;
|
||||
params_new->tcft_enc_metadata = metadata;
|
||||
|
||||
spin_lock(&t->tcf_lock);
|
||||
spin_lock_bh(&t->tcf_lock);
|
||||
t->tcf_action = parm->action;
|
||||
rcu_swap_protected(t->params, params_new,
|
||||
lockdep_is_held(&t->tcf_lock));
|
||||
spin_unlock(&t->tcf_lock);
|
||||
spin_unlock_bh(&t->tcf_lock);
|
||||
if (params_new)
|
||||
kfree_rcu(params_new, rcu);
|
||||
|
||||
@ -485,7 +485,7 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
};
|
||||
struct tcf_t tm;
|
||||
|
||||
spin_lock(&t->tcf_lock);
|
||||
spin_lock_bh(&t->tcf_lock);
|
||||
params = rcu_dereference_protected(t->params,
|
||||
lockdep_is_held(&t->tcf_lock));
|
||||
opt.action = t->tcf_action;
|
||||
@ -520,12 +520,12 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
|
||||
&tm, TCA_TUNNEL_KEY_PAD))
|
||||
goto nla_put_failure;
|
||||
spin_unlock(&t->tcf_lock);
|
||||
spin_unlock_bh(&t->tcf_lock);
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
spin_unlock(&t->tcf_lock);
|
||||
spin_unlock_bh(&t->tcf_lock);
|
||||
nlmsg_trim(skb, b);
|
||||
return -1;
|
||||
}
|
||||
|
@ -213,10 +213,10 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
||||
p->tcfv_push_prio = push_prio;
|
||||
p->tcfv_push_proto = push_proto;
|
||||
|
||||
spin_lock(&v->tcf_lock);
|
||||
spin_lock_bh(&v->tcf_lock);
|
||||
v->tcf_action = parm->action;
|
||||
rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
|
||||
spin_unlock(&v->tcf_lock);
|
||||
spin_unlock_bh(&v->tcf_lock);
|
||||
|
||||
if (p)
|
||||
kfree_rcu(p, rcu);
|
||||
@ -249,7 +249,7 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
};
|
||||
struct tcf_t t;
|
||||
|
||||
spin_lock(&v->tcf_lock);
|
||||
spin_lock_bh(&v->tcf_lock);
|
||||
opt.action = v->tcf_action;
|
||||
p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock));
|
||||
opt.v_action = p->tcfv_action;
|
||||
@ -268,12 +268,12 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
tcf_tm_dump(&t, &v->tcf_tm);
|
||||
if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
|
||||
goto nla_put_failure;
|
||||
spin_unlock(&v->tcf_lock);
|
||||
spin_unlock_bh(&v->tcf_lock);
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
spin_unlock(&v->tcf_lock);
|
||||
spin_unlock_bh(&v->tcf_lock);
|
||||
nlmsg_trim(skb, b);
|
||||
return -1;
|
||||
}
|
||||
|
@ -113,6 +113,8 @@ static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
|
||||
if (!head)
|
||||
return;
|
||||
|
||||
tcf_unbind_filter(tp, &head->res);
|
||||
|
||||
if (!tc_skip_hw(head->flags))
|
||||
mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
|
||||
|
||||
|
@ -45,6 +45,7 @@
|
||||
MODULE_AUTHOR("Mellanox Technologies");
|
||||
MODULE_DESCRIPTION("Transport Layer Security Support");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_ALIAS_TCP_ULP("tls");
|
||||
|
||||
enum {
|
||||
TLSV4,
|
||||
|
@ -679,8 +679,9 @@ int main(int argc, char **argv)
|
||||
return EXIT_FAIL_OPTION;
|
||||
}
|
||||
|
||||
/* Remove XDP program when program is interrupted */
|
||||
/* Remove XDP program when program is interrupted or killed */
|
||||
signal(SIGINT, int_exit);
|
||||
signal(SIGTERM, int_exit);
|
||||
|
||||
if (bpf_set_link_xdp_fd(ifindex, prog_fd[prog_num], xdp_flags) < 0) {
|
||||
fprintf(stderr, "link set xdp fd failed\n");
|
||||
|
@ -567,8 +567,9 @@ int main(int argc, char **argv)
|
||||
exit(EXIT_FAIL_BPF);
|
||||
}
|
||||
|
||||
/* Remove XDP program when program is interrupted */
|
||||
/* Remove XDP program when program is interrupted or killed */
|
||||
signal(SIGINT, int_exit);
|
||||
signal(SIGTERM, int_exit);
|
||||
|
||||
if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
|
||||
fprintf(stderr, "link set xdp fd failed\n");
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "bpf_rlimit.h"
|
||||
#include "cgroup_helpers.h"
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
Loading…
Reference in New Issue
Block a user