mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
bpf-for-netdev
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZVz1PAAKCRDbK58LschI g/RHAQCid/nEX6phIBKnsxzPSsJv4/W9WsXuqw5OfN9eUW/kUAEAoC4M1trPgUZG /nzFP3BLOZR2jqZaVwL/xu/H5fp0DwA= =nNka -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Daniel Borkmann says: ==================== pull-request: bpf 2023-11-21 We've added 19 non-merge commits during the last 4 day(s) which contain a total of 18 files changed, 1043 insertions(+), 416 deletions(-). The main changes are: 1) Fix BPF verifier to validate callbacks as if they are called an unknown number of times in order to fix not detecting some unsafe programs, from Eduard Zingerman. 2) Fix bpf_redirect_peer() handling which missed proper stats accounting for veth and netkit and also generally fix missing stats for the latter, from Peilin Ye, Daniel Borkmann et al. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: selftests/bpf: check if max number of bpf_loop iterations is tracked bpf: keep track of max number of bpf_loop callback iterations selftests/bpf: test widening for iterating callbacks bpf: widening for callback iterators selftests/bpf: tests for iterating callbacks bpf: verify callbacks as if they are called unknown number of times bpf: extract setup_func_entry() utility function bpf: extract __check_reg_arg() utility function selftests/bpf: fix bpf_loop_bench for new callback verification scheme selftests/bpf: track string payload offset as scalar in strobemeta selftests/bpf: track tcp payload offset as scalar in xdp_synproxy selftests/bpf: Add netkit to tc_redirect selftest selftests/bpf: De-veth-ize the tc_redirect test case bpf, netkit: Add indirect call wrapper for fetching peer dev bpf: Fix dev's rx stats for bpf_redirect_peer traffic veth: Use tstats per-CPU traffic counters netkit: Add tstats per-CPU traffic counters net: Move {l,t,d}stats allocation to core and convert veth & vrf net, vrf: Move dstats structure to core ==================== Link: https://lore.kernel.org/r/20231121193113.11796-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
b2d66643dc
@ -7,6 +7,7 @@
|
||||
#include <linux/filter.h>
|
||||
#include <linux/netfilter_netdev.h>
|
||||
#include <linux/bpf_mprog.h>
|
||||
#include <linux/indirect_call_wrapper.h>
|
||||
|
||||
#include <net/netkit.h>
|
||||
#include <net/dst.h>
|
||||
@ -68,6 +69,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
|
||||
const struct bpf_mprog_entry *entry;
|
||||
struct net_device *peer;
|
||||
int len = skb->len;
|
||||
|
||||
rcu_read_lock();
|
||||
peer = rcu_dereference(nk->peer);
|
||||
@ -85,15 +87,22 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
case NETKIT_PASS:
|
||||
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
||||
__netif_rx(skb);
|
||||
if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
|
||||
dev_sw_netstats_tx_add(dev, 1, len);
|
||||
dev_sw_netstats_rx_add(peer, len);
|
||||
} else {
|
||||
goto drop_stats;
|
||||
}
|
||||
break;
|
||||
case NETKIT_REDIRECT:
|
||||
dev_sw_netstats_tx_add(dev, 1, len);
|
||||
skb_do_redirect(skb);
|
||||
break;
|
||||
case NETKIT_DROP:
|
||||
default:
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
drop_stats:
|
||||
dev_core_stats_tx_dropped_inc(dev);
|
||||
ret_dev = NET_XMIT_DROP;
|
||||
break;
|
||||
@ -169,11 +178,18 @@ out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static struct net_device *netkit_peer_dev(struct net_device *dev)
|
||||
INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
|
||||
{
|
||||
return rcu_dereference(netkit_priv(dev)->peer);
|
||||
}
|
||||
|
||||
static void netkit_get_stats(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
dev_fetch_sw_netstats(stats, dev->tstats);
|
||||
stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
|
||||
}
|
||||
|
||||
static void netkit_uninit(struct net_device *dev);
|
||||
|
||||
static const struct net_device_ops netkit_netdev_ops = {
|
||||
@ -184,6 +200,7 @@ static const struct net_device_ops netkit_netdev_ops = {
|
||||
.ndo_set_rx_headroom = netkit_set_headroom,
|
||||
.ndo_get_iflink = netkit_get_iflink,
|
||||
.ndo_get_peer_dev = netkit_peer_dev,
|
||||
.ndo_get_stats64 = netkit_get_stats,
|
||||
.ndo_uninit = netkit_uninit,
|
||||
.ndo_features_check = passthru_features_check,
|
||||
};
|
||||
@ -218,6 +235,7 @@ static void netkit_setup(struct net_device *dev)
|
||||
|
||||
ether_setup(dev);
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
|
||||
|
||||
dev->flags |= IFF_NOARP;
|
||||
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||
|
@ -373,7 +373,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
skb_tx_timestamp(skb);
|
||||
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
|
||||
if (!use_napi)
|
||||
dev_lstats_add(dev, length);
|
||||
dev_sw_netstats_tx_add(dev, 1, length);
|
||||
else
|
||||
__veth_xdp_flush(rq);
|
||||
} else {
|
||||
@ -387,14 +387,6 @@ drop:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
|
||||
{
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
|
||||
dev_lstats_read(dev, packets, bytes);
|
||||
return atomic64_read(&priv->dropped);
|
||||
}
|
||||
|
||||
static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
|
||||
{
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
@ -432,24 +424,24 @@ static void veth_get_stats64(struct net_device *dev,
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
struct net_device *peer;
|
||||
struct veth_stats rx;
|
||||
u64 packets, bytes;
|
||||
|
||||
tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
|
||||
tot->tx_bytes = bytes;
|
||||
tot->tx_packets = packets;
|
||||
tot->tx_dropped = atomic64_read(&priv->dropped);
|
||||
dev_fetch_sw_netstats(tot, dev->tstats);
|
||||
|
||||
veth_stats_rx(&rx, dev);
|
||||
tot->tx_dropped += rx.xdp_tx_err;
|
||||
tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
|
||||
tot->rx_bytes = rx.xdp_bytes;
|
||||
tot->rx_packets = rx.xdp_packets;
|
||||
tot->rx_bytes += rx.xdp_bytes;
|
||||
tot->rx_packets += rx.xdp_packets;
|
||||
|
||||
rcu_read_lock();
|
||||
peer = rcu_dereference(priv->peer);
|
||||
if (peer) {
|
||||
veth_stats_tx(peer, &packets, &bytes);
|
||||
tot->rx_bytes += bytes;
|
||||
tot->rx_packets += packets;
|
||||
struct rtnl_link_stats64 tot_peer = {};
|
||||
|
||||
dev_fetch_sw_netstats(&tot_peer, peer->tstats);
|
||||
tot->rx_bytes += tot_peer.tx_bytes;
|
||||
tot->rx_packets += tot_peer.tx_packets;
|
||||
|
||||
veth_stats_rx(&rx, peer);
|
||||
tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
|
||||
@ -1506,25 +1498,12 @@ static void veth_free_queues(struct net_device *dev)
|
||||
|
||||
static int veth_dev_init(struct net_device *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
|
||||
if (!dev->lstats)
|
||||
return -ENOMEM;
|
||||
|
||||
err = veth_alloc_queues(dev);
|
||||
if (err) {
|
||||
free_percpu(dev->lstats);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return veth_alloc_queues(dev);
|
||||
}
|
||||
|
||||
static void veth_dev_free(struct net_device *dev)
|
||||
{
|
||||
veth_free_queues(dev);
|
||||
free_percpu(dev->lstats);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
@ -1796,6 +1775,7 @@ static void veth_setup(struct net_device *dev)
|
||||
NETIF_F_HW_VLAN_STAG_RX);
|
||||
dev->needs_free_netdev = true;
|
||||
dev->priv_destructor = veth_dev_free;
|
||||
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
|
||||
dev->hw_features = VETH_FEATURES;
|
||||
|
@ -121,22 +121,12 @@ struct net_vrf {
|
||||
int ifindex;
|
||||
};
|
||||
|
||||
struct pcpu_dstats {
|
||||
u64 tx_pkts;
|
||||
u64 tx_bytes;
|
||||
u64 tx_drps;
|
||||
u64 rx_pkts;
|
||||
u64 rx_bytes;
|
||||
u64 rx_drps;
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
static void vrf_rx_stats(struct net_device *dev, int len)
|
||||
{
|
||||
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
|
||||
|
||||
u64_stats_update_begin(&dstats->syncp);
|
||||
dstats->rx_pkts++;
|
||||
dstats->rx_packets++;
|
||||
dstats->rx_bytes += len;
|
||||
u64_stats_update_end(&dstats->syncp);
|
||||
}
|
||||
@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev,
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&dstats->syncp);
|
||||
tbytes = dstats->tx_bytes;
|
||||
tpkts = dstats->tx_pkts;
|
||||
tdrops = dstats->tx_drps;
|
||||
tpkts = dstats->tx_packets;
|
||||
tdrops = dstats->tx_drops;
|
||||
rbytes = dstats->rx_bytes;
|
||||
rpkts = dstats->rx_pkts;
|
||||
rpkts = dstats->rx_packets;
|
||||
} while (u64_stats_fetch_retry(&dstats->syncp, start));
|
||||
stats->tx_bytes += tbytes;
|
||||
stats->tx_packets += tpkts;
|
||||
@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
|
||||
vrf_rx_stats(dev, len);
|
||||
else
|
||||
this_cpu_inc(dev->dstats->rx_drps);
|
||||
this_cpu_inc(dev->dstats->rx_drops);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
|
||||
|
||||
u64_stats_update_begin(&dstats->syncp);
|
||||
dstats->tx_pkts++;
|
||||
dstats->tx_packets++;
|
||||
dstats->tx_bytes += len;
|
||||
u64_stats_update_end(&dstats->syncp);
|
||||
} else {
|
||||
this_cpu_inc(dev->dstats->tx_drps);
|
||||
this_cpu_inc(dev->dstats->tx_drops);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1174,22 +1164,15 @@ static void vrf_dev_uninit(struct net_device *dev)
|
||||
|
||||
vrf_rtable_release(dev, vrf);
|
||||
vrf_rt6_release(dev, vrf);
|
||||
|
||||
free_percpu(dev->dstats);
|
||||
dev->dstats = NULL;
|
||||
}
|
||||
|
||||
static int vrf_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct net_vrf *vrf = netdev_priv(dev);
|
||||
|
||||
dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
|
||||
if (!dev->dstats)
|
||||
goto out_nomem;
|
||||
|
||||
/* create the default dst which points back to us */
|
||||
if (vrf_rtable_create(dev) != 0)
|
||||
goto out_stats;
|
||||
goto out_nomem;
|
||||
|
||||
if (vrf_rt6_create(dev) != 0)
|
||||
goto out_rth;
|
||||
@ -1203,9 +1186,6 @@ static int vrf_dev_init(struct net_device *dev)
|
||||
|
||||
out_rth:
|
||||
vrf_rtable_release(dev, vrf);
|
||||
out_stats:
|
||||
free_percpu(dev->dstats);
|
||||
dev->dstats = NULL;
|
||||
out_nomem:
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1704,6 +1684,8 @@ static void vrf_setup(struct net_device *dev)
|
||||
dev->min_mtu = IPV6_MIN_MTU;
|
||||
dev->max_mtu = IP6_MAX_MTU;
|
||||
dev->mtu = dev->max_mtu;
|
||||
|
||||
dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
|
||||
}
|
||||
|
||||
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
|
@ -301,6 +301,17 @@ struct bpf_func_state {
|
||||
struct tnum callback_ret_range;
|
||||
bool in_async_callback_fn;
|
||||
bool in_exception_callback_fn;
|
||||
/* For callback calling functions that limit number of possible
|
||||
* callback executions (e.g. bpf_loop) keeps track of current
|
||||
* simulated iteration number.
|
||||
* Value in frame N refers to number of times callback with frame
|
||||
* N+1 was simulated, e.g. for the following call:
|
||||
*
|
||||
* bpf_loop(..., fn, ...); | suppose current frame is N
|
||||
* | fn would be simulated in frame N+1
|
||||
* | number of simulations is tracked in frame N
|
||||
*/
|
||||
u32 callback_depth;
|
||||
|
||||
/* The following fields should be last. See copy_func_state() */
|
||||
int acquired_refs;
|
||||
@ -400,6 +411,7 @@ struct bpf_verifier_state {
|
||||
struct bpf_idx_pair *jmp_history;
|
||||
u32 jmp_history_cnt;
|
||||
u32 dfs_depth;
|
||||
u32 callback_unroll_depth;
|
||||
};
|
||||
|
||||
#define bpf_get_spilled_reg(slot, frame, mask) \
|
||||
@ -511,6 +523,10 @@ struct bpf_insn_aux_data {
|
||||
* this instruction, regardless of any heuristics
|
||||
*/
|
||||
bool force_checkpoint;
|
||||
/* true if instruction is a call to a helper function that
|
||||
* accepts callback function as a parameter.
|
||||
*/
|
||||
bool calls_callback;
|
||||
};
|
||||
|
||||
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
||||
|
@ -1797,6 +1797,13 @@ enum netdev_ml_priv_type {
|
||||
ML_PRIV_CAN,
|
||||
};
|
||||
|
||||
enum netdev_stat_type {
|
||||
NETDEV_PCPU_STAT_NONE,
|
||||
NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
|
||||
NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
|
||||
NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct net_device - The DEVICE structure.
|
||||
*
|
||||
@ -1991,10 +1998,14 @@ enum netdev_ml_priv_type {
|
||||
*
|
||||
* @ml_priv: Mid-layer private
|
||||
* @ml_priv_type: Mid-layer private type
|
||||
* @lstats: Loopback statistics
|
||||
* @tstats: Tunnel statistics
|
||||
* @dstats: Dummy statistics
|
||||
* @vstats: Virtual ethernet statistics
|
||||
*
|
||||
* @pcpu_stat_type: Type of device statistics which the core should
|
||||
* allocate/free: none, lstats, tstats, dstats. none
|
||||
* means the driver is handling statistics allocation/
|
||||
* freeing internally.
|
||||
* @lstats: Loopback statistics: packets, bytes
|
||||
* @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
|
||||
* @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
|
||||
*
|
||||
* @garp_port: GARP
|
||||
* @mrp_port: MRP
|
||||
@ -2354,6 +2365,7 @@ struct net_device {
|
||||
void *ml_priv;
|
||||
enum netdev_ml_priv_type ml_priv_type;
|
||||
|
||||
enum netdev_stat_type pcpu_stat_type:8;
|
||||
union {
|
||||
struct pcpu_lstats __percpu *lstats;
|
||||
struct pcpu_sw_netstats __percpu *tstats;
|
||||
@ -2755,6 +2767,16 @@ struct pcpu_sw_netstats {
|
||||
struct u64_stats_sync syncp;
|
||||
} __aligned(4 * sizeof(u64));
|
||||
|
||||
struct pcpu_dstats {
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 rx_drops;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
u64 tx_drops;
|
||||
struct u64_stats_sync syncp;
|
||||
} __aligned(8 * sizeof(u64));
|
||||
|
||||
struct pcpu_lstats {
|
||||
u64_stats_t packets;
|
||||
u64_stats_t bytes;
|
||||
|
@ -10,6 +10,7 @@ int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
|
||||
INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev));
|
||||
#else
|
||||
static inline int netkit_prog_attach(const union bpf_attr *attr,
|
||||
struct bpf_prog *prog)
|
||||
@ -34,5 +35,10 @@ static inline int netkit_prog_query(const union bpf_attr *attr,
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct net_device *netkit_peer_dev(struct net_device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_NETKIT */
|
||||
#endif /* __NET_NETKIT_H */
|
||||
|
@ -547,13 +547,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
|
||||
return func_id == BPF_FUNC_dynptr_data;
|
||||
}
|
||||
|
||||
static bool is_callback_calling_kfunc(u32 btf_id);
|
||||
static bool is_sync_callback_calling_kfunc(u32 btf_id);
|
||||
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
|
||||
|
||||
static bool is_callback_calling_function(enum bpf_func_id func_id)
|
||||
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_for_each_map_elem ||
|
||||
func_id == BPF_FUNC_timer_set_callback ||
|
||||
func_id == BPF_FUNC_find_vma ||
|
||||
func_id == BPF_FUNC_loop ||
|
||||
func_id == BPF_FUNC_user_ringbuf_drain;
|
||||
@ -564,6 +563,18 @@ static bool is_async_callback_calling_function(enum bpf_func_id func_id)
|
||||
return func_id == BPF_FUNC_timer_set_callback;
|
||||
}
|
||||
|
||||
static bool is_callback_calling_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return is_sync_callback_calling_function(func_id) ||
|
||||
is_async_callback_calling_function(func_id);
|
||||
}
|
||||
|
||||
static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
|
||||
{
|
||||
return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) ||
|
||||
(bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm));
|
||||
}
|
||||
|
||||
static bool is_storage_get_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_sk_storage_get ||
|
||||
@ -1808,6 +1819,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
|
||||
dst_state->first_insn_idx = src->first_insn_idx;
|
||||
dst_state->last_insn_idx = src->last_insn_idx;
|
||||
dst_state->dfs_depth = src->dfs_depth;
|
||||
dst_state->callback_unroll_depth = src->callback_unroll_depth;
|
||||
dst_state->used_as_loop_entry = src->used_as_loop_entry;
|
||||
for (i = 0; i <= src->curframe; i++) {
|
||||
dst = dst_state->frame[i];
|
||||
@ -3439,13 +3451,11 @@ static void mark_insn_zext(struct bpf_verifier_env *env,
|
||||
reg->subreg_def = DEF_NOT_SUBREG;
|
||||
}
|
||||
|
||||
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
|
||||
enum reg_arg_type t)
|
||||
static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno,
|
||||
enum reg_arg_type t)
|
||||
{
|
||||
struct bpf_verifier_state *vstate = env->cur_state;
|
||||
struct bpf_func_state *state = vstate->frame[vstate->curframe];
|
||||
struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
|
||||
struct bpf_reg_state *reg, *regs = state->regs;
|
||||
struct bpf_reg_state *reg;
|
||||
bool rw64;
|
||||
|
||||
if (regno >= MAX_BPF_REG) {
|
||||
@ -3486,6 +3496,15 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
|
||||
enum reg_arg_type t)
|
||||
{
|
||||
struct bpf_verifier_state *vstate = env->cur_state;
|
||||
struct bpf_func_state *state = vstate->frame[vstate->curframe];
|
||||
|
||||
return __check_reg_arg(env, state->regs, regno, t);
|
||||
}
|
||||
|
||||
static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
|
||||
{
|
||||
env->insn_aux_data[idx].jmp_point = true;
|
||||
@ -3724,6 +3743,8 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
|
||||
}
|
||||
}
|
||||
|
||||
static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
|
||||
|
||||
/* For given verifier state backtrack_insn() is called from the last insn to
|
||||
* the first insn. Its purpose is to compute a bitmask of registers and
|
||||
* stack slots that needs precision in the parent verifier state.
|
||||
@ -3899,16 +3920,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
} else if ((bpf_helper_call(insn) &&
|
||||
is_callback_calling_function(insn->imm) &&
|
||||
!is_async_callback_calling_function(insn->imm)) ||
|
||||
(bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) {
|
||||
/* callback-calling helper or kfunc call, which means
|
||||
* we are exiting from subprog, but unlike the subprog
|
||||
* call handling above, we shouldn't propagate
|
||||
* precision of r1-r5 (if any requested), as they are
|
||||
* not actually arguments passed directly to callback
|
||||
* subprogs
|
||||
} else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) {
|
||||
/* exit from callback subprog to callback-calling helper or
|
||||
* kfunc call. Use idx/subseq_idx check to discern it from
|
||||
* straight line code backtracking.
|
||||
* Unlike the subprog call handling above, we shouldn't
|
||||
* propagate precision of r1-r5 (if any requested), as they are
|
||||
* not actually arguments passed directly to callback subprogs
|
||||
*/
|
||||
if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
|
||||
verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
|
||||
@ -3943,10 +3961,18 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
|
||||
} else if (opcode == BPF_EXIT) {
|
||||
bool r0_precise;
|
||||
|
||||
/* Backtracking to a nested function call, 'idx' is a part of
|
||||
* the inner frame 'subseq_idx' is a part of the outer frame.
|
||||
* In case of a regular function call, instructions giving
|
||||
* precision to registers R1-R5 should have been found already.
|
||||
* In case of a callback, it is ok to have R1-R5 marked for
|
||||
* backtracking, as these registers are set by the function
|
||||
* invoking callback.
|
||||
*/
|
||||
if (subseq_idx >= 0 && calls_callback(env, subseq_idx))
|
||||
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
|
||||
bt_clear_reg(bt, i);
|
||||
if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
|
||||
/* if backtracing was looking for registers R1-R5
|
||||
* they should have been found already.
|
||||
*/
|
||||
verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
@ -9350,7 +9376,7 @@ static void clear_caller_saved_regs(struct bpf_verifier_env *env,
|
||||
/* after the call registers r0 - r5 were scratched */
|
||||
for (i = 0; i < CALLER_SAVED_REGS; i++) {
|
||||
mark_reg_not_init(env, regs, caller_saved[i]);
|
||||
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
|
||||
__check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK);
|
||||
}
|
||||
}
|
||||
|
||||
@ -9363,11 +9389,10 @@ static int set_callee_state(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee, int insn_idx);
|
||||
|
||||
static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx, int subprog,
|
||||
set_callee_state_fn set_callee_state_cb)
|
||||
static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite,
|
||||
set_callee_state_fn set_callee_state_cb,
|
||||
struct bpf_verifier_state *state)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_func_state *caller, *callee;
|
||||
int err;
|
||||
|
||||
@ -9377,82 +9402,13 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
caller = state->frame[state->curframe];
|
||||
if (state->frame[state->curframe + 1]) {
|
||||
verbose(env, "verifier bug. Frame %d already allocated\n",
|
||||
state->curframe + 1);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
err = btf_check_subprog_call(env, subprog, caller->regs);
|
||||
if (err == -EFAULT)
|
||||
return err;
|
||||
if (subprog_is_global(env, subprog)) {
|
||||
if (err) {
|
||||
verbose(env, "Caller passes invalid args into func#%d\n",
|
||||
subprog);
|
||||
return err;
|
||||
} else {
|
||||
if (env->log.level & BPF_LOG_LEVEL)
|
||||
verbose(env,
|
||||
"Func#%d is global and valid. Skipping.\n",
|
||||
subprog);
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
|
||||
/* All global functions return a 64-bit SCALAR_VALUE */
|
||||
mark_reg_unknown(env, caller->regs, BPF_REG_0);
|
||||
caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
|
||||
|
||||
/* continue with next insn after call */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* set_callee_state is used for direct subprog calls, but we are
|
||||
* interested in validating only BPF helpers that can call subprogs as
|
||||
* callbacks
|
||||
*/
|
||||
if (set_callee_state_cb != set_callee_state) {
|
||||
env->subprog_info[subprog].is_cb = true;
|
||||
if (bpf_pseudo_kfunc_call(insn) &&
|
||||
!is_callback_calling_kfunc(insn->imm)) {
|
||||
verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
return -EFAULT;
|
||||
} else if (!bpf_pseudo_kfunc_call(insn) &&
|
||||
!is_callback_calling_function(insn->imm)) { /* helper */
|
||||
verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
if (insn->code == (BPF_JMP | BPF_CALL) &&
|
||||
insn->src_reg == 0 &&
|
||||
insn->imm == BPF_FUNC_timer_set_callback) {
|
||||
struct bpf_verifier_state *async_cb;
|
||||
|
||||
/* there is no real recursion here. timer callbacks are async */
|
||||
env->subprog_info[subprog].is_async_cb = true;
|
||||
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
|
||||
*insn_idx, subprog);
|
||||
if (!async_cb)
|
||||
return -EFAULT;
|
||||
callee = async_cb->frame[0];
|
||||
callee->async_entry_cnt = caller->async_entry_cnt + 1;
|
||||
|
||||
/* Convert bpf_timer_set_callback() args into timer callback args */
|
||||
err = set_callee_state_cb(env, caller, callee, *insn_idx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
mark_reg_unknown(env, caller->regs, BPF_REG_0);
|
||||
caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
|
||||
/* continue with next insn after call */
|
||||
return 0;
|
||||
}
|
||||
|
||||
caller = state->frame[state->curframe];
|
||||
callee = kzalloc(sizeof(*callee), GFP_KERNEL);
|
||||
if (!callee)
|
||||
return -ENOMEM;
|
||||
@ -9464,24 +9420,141 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
*/
|
||||
init_func_state(env, callee,
|
||||
/* remember the callsite, it will be used by bpf_exit */
|
||||
*insn_idx /* callsite */,
|
||||
callsite,
|
||||
state->curframe + 1 /* frameno within this callchain */,
|
||||
subprog /* subprog number within this prog */);
|
||||
|
||||
/* Transfer references to the callee */
|
||||
err = copy_reference_state(callee, caller);
|
||||
err = err ?: set_callee_state_cb(env, caller, callee, callsite);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = set_callee_state_cb(env, caller, callee, *insn_idx);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
|
||||
/* only increment it after check_reg_arg() finished */
|
||||
state->curframe++;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
free_func_state(callee);
|
||||
state->frame[state->curframe + 1] = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int insn_idx, int subprog,
|
||||
set_callee_state_fn set_callee_state_cb)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state, *callback_state;
|
||||
struct bpf_func_state *caller, *callee;
|
||||
int err;
|
||||
|
||||
caller = state->frame[state->curframe];
|
||||
err = btf_check_subprog_call(env, subprog, caller->regs);
|
||||
if (err == -EFAULT)
|
||||
return err;
|
||||
|
||||
/* set_callee_state is used for direct subprog calls, but we are
|
||||
* interested in validating only BPF helpers that can call subprogs as
|
||||
* callbacks
|
||||
*/
|
||||
env->subprog_info[subprog].is_cb = true;
|
||||
if (bpf_pseudo_kfunc_call(insn) &&
|
||||
!is_sync_callback_calling_kfunc(insn->imm)) {
|
||||
verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
return -EFAULT;
|
||||
} else if (!bpf_pseudo_kfunc_call(insn) &&
|
||||
!is_callback_calling_function(insn->imm)) { /* helper */
|
||||
verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (insn->code == (BPF_JMP | BPF_CALL) &&
|
||||
insn->src_reg == 0 &&
|
||||
insn->imm == BPF_FUNC_timer_set_callback) {
|
||||
struct bpf_verifier_state *async_cb;
|
||||
|
||||
/* there is no real recursion here. timer callbacks are async */
|
||||
env->subprog_info[subprog].is_async_cb = true;
|
||||
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
|
||||
insn_idx, subprog);
|
||||
if (!async_cb)
|
||||
return -EFAULT;
|
||||
callee = async_cb->frame[0];
|
||||
callee->async_entry_cnt = caller->async_entry_cnt + 1;
|
||||
|
||||
/* Convert bpf_timer_set_callback() args into timer callback args */
|
||||
err = set_callee_state_cb(env, caller, callee, insn_idx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* for callback functions enqueue entry to callback and
|
||||
* proceed with next instruction within current frame.
|
||||
*/
|
||||
callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false);
|
||||
if (!callback_state)
|
||||
return -ENOMEM;
|
||||
|
||||
err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb,
|
||||
callback_state);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
callback_state->callback_unroll_depth++;
|
||||
callback_state->frame[callback_state->curframe - 1]->callback_depth++;
|
||||
caller->callback_depth = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_func_state *caller;
|
||||
int err, subprog, target_insn;
|
||||
|
||||
target_insn = *insn_idx + insn->imm + 1;
|
||||
subprog = find_subprog(env, target_insn);
|
||||
if (subprog < 0) {
|
||||
verbose(env, "verifier bug. No program starts at insn %d\n", target_insn);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
caller = state->frame[state->curframe];
|
||||
err = btf_check_subprog_call(env, subprog, caller->regs);
|
||||
if (err == -EFAULT)
|
||||
return err;
|
||||
if (subprog_is_global(env, subprog)) {
|
||||
if (err) {
|
||||
verbose(env, "Caller passes invalid args into func#%d\n", subprog);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (env->log.level & BPF_LOG_LEVEL)
|
||||
verbose(env, "Func#%d is global and valid. Skipping.\n", subprog);
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
|
||||
/* All global functions return a 64-bit SCALAR_VALUE */
|
||||
mark_reg_unknown(env, caller->regs, BPF_REG_0);
|
||||
caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
|
||||
|
||||
/* continue with next insn after call */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* for regular function entry setup new frame and continue
|
||||
* from that frame.
|
||||
*/
|
||||
err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
|
||||
/* and go analyze first insn of the callee */
|
||||
*insn_idx = env->subprog_info[subprog].start - 1;
|
||||
|
||||
@ -9489,14 +9562,10 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
verbose(env, "caller:\n");
|
||||
print_verifier_state(env, caller, true);
|
||||
verbose(env, "callee:\n");
|
||||
print_verifier_state(env, callee, true);
|
||||
print_verifier_state(env, state->frame[state->curframe], true);
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
free_func_state(callee);
|
||||
state->frame[state->curframe + 1] = NULL;
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
|
||||
@ -9540,22 +9609,6 @@ static int set_callee_state(struct bpf_verifier_env *env,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx)
|
||||
{
|
||||
int subprog, target_insn;
|
||||
|
||||
target_insn = *insn_idx + insn->imm + 1;
|
||||
subprog = find_subprog(env, target_insn);
|
||||
if (subprog < 0) {
|
||||
verbose(env, "verifier bug. No program starts at insn %d\n",
|
||||
target_insn);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
|
||||
}
|
||||
|
||||
static int set_map_elem_callback_state(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee,
|
||||
@ -9748,9 +9801,10 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
|
||||
|
||||
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_verifier_state *state = env->cur_state, *prev_st;
|
||||
struct bpf_func_state *caller, *callee;
|
||||
struct bpf_reg_state *r0;
|
||||
bool in_callback_fn;
|
||||
int err;
|
||||
|
||||
callee = state->frame[state->curframe];
|
||||
@ -9779,6 +9833,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||
verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!calls_callback(env, callee->callsite)) {
|
||||
verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n",
|
||||
*insn_idx, callee->callsite);
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
/* return to the caller whatever r0 had in the callee */
|
||||
caller->regs[BPF_REG_0] = *r0;
|
||||
@ -9796,7 +9855,16 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||
return err;
|
||||
}
|
||||
|
||||
*insn_idx = callee->callsite + 1;
|
||||
/* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite,
|
||||
* there function call logic would reschedule callback visit. If iteration
|
||||
* converges is_state_visited() would prune that visit eventually.
|
||||
*/
|
||||
in_callback_fn = callee->in_callback_fn;
|
||||
if (in_callback_fn)
|
||||
*insn_idx = callee->callsite;
|
||||
else
|
||||
*insn_idx = callee->callsite + 1;
|
||||
|
||||
if (env->log.level & BPF_LOG_LEVEL) {
|
||||
verbose(env, "returning from callee:\n");
|
||||
print_verifier_state(env, callee, true);
|
||||
@ -9807,6 +9875,24 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||
* bpf_throw, this will be done by copy_verifier_state for extra frames. */
|
||||
free_func_state(callee);
|
||||
state->frame[state->curframe--] = NULL;
|
||||
|
||||
/* for callbacks widen imprecise scalars to make programs like below verify:
|
||||
*
|
||||
* struct ctx { int i; }
|
||||
* void cb(int idx, struct ctx *ctx) { ctx->i++; ... }
|
||||
* ...
|
||||
* struct ctx = { .i = 0; }
|
||||
* bpf_loop(100, cb, &ctx, 0);
|
||||
*
|
||||
* This is similar to what is done in process_iter_next_call() for open
|
||||
* coded iterators.
|
||||
*/
|
||||
prev_st = in_callback_fn ? find_prev_entry(env, state, *insn_idx) : NULL;
|
||||
if (prev_st) {
|
||||
err = widen_imprecise_scalars(env, prev_st, state);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10209,24 +10295,37 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
}
|
||||
break;
|
||||
case BPF_FUNC_for_each_map_elem:
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_map_elem_callback_state);
|
||||
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
|
||||
set_map_elem_callback_state);
|
||||
break;
|
||||
case BPF_FUNC_timer_set_callback:
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_timer_callback_state);
|
||||
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
|
||||
set_timer_callback_state);
|
||||
break;
|
||||
case BPF_FUNC_find_vma:
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_find_vma_callback_state);
|
||||
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
|
||||
set_find_vma_callback_state);
|
||||
break;
|
||||
case BPF_FUNC_snprintf:
|
||||
err = check_bpf_snprintf_call(env, regs);
|
||||
break;
|
||||
case BPF_FUNC_loop:
|
||||
update_loop_inline_state(env, meta.subprogno);
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_loop_callback_state);
|
||||
/* Verifier relies on R1 value to determine if bpf_loop() iteration
|
||||
* is finished, thus mark it precise.
|
||||
*/
|
||||
err = mark_chain_precision(env, BPF_REG_1);
|
||||
if (err)
|
||||
return err;
|
||||
if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) {
|
||||
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
|
||||
set_loop_callback_state);
|
||||
} else {
|
||||
cur_func(env)->callback_depth = 0;
|
||||
if (env->log.level & BPF_LOG_LEVEL2)
|
||||
verbose(env, "frame%d bpf_loop iteration limit reached\n",
|
||||
env->cur_state->curframe);
|
||||
}
|
||||
break;
|
||||
case BPF_FUNC_dynptr_from_mem:
|
||||
if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
|
||||
@ -10322,8 +10421,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
break;
|
||||
}
|
||||
case BPF_FUNC_user_ringbuf_drain:
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_user_ringbuf_callback_state);
|
||||
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
|
||||
set_user_ringbuf_callback_state);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -11211,7 +11310,7 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id)
|
||||
btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
|
||||
}
|
||||
|
||||
static bool is_callback_calling_kfunc(u32 btf_id)
|
||||
static bool is_sync_callback_calling_kfunc(u32 btf_id)
|
||||
{
|
||||
return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
|
||||
}
|
||||
@ -11963,6 +12062,21 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* Check the arguments */
|
||||
err = check_kfunc_args(env, &meta, insn_idx);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
|
||||
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
|
||||
set_rbtree_add_callback_state);
|
||||
if (err) {
|
||||
verbose(env, "kfunc %s#%d failed callback verification\n",
|
||||
func_name, meta.func_id);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
|
||||
rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
|
||||
|
||||
@ -11998,10 +12112,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check the arguments */
|
||||
err = check_kfunc_args(env, &meta, insn_idx);
|
||||
if (err < 0)
|
||||
return err;
|
||||
/* In case of release function, we get register number of refcounted
|
||||
* PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
|
||||
*/
|
||||
@ -12035,16 +12145,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
}
|
||||
}
|
||||
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_rbtree_add_callback_state);
|
||||
if (err) {
|
||||
verbose(env, "kfunc %s#%d failed callback verification\n",
|
||||
func_name, meta.func_id);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_throw]) {
|
||||
if (!bpf_jit_supports_exceptions()) {
|
||||
verbose(env, "JIT does not support calling kfunc %s#%d\n",
|
||||
@ -15408,6 +15508,15 @@ static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
|
||||
return env->insn_aux_data[insn_idx].force_checkpoint;
|
||||
}
|
||||
|
||||
static void mark_calls_callback(struct bpf_verifier_env *env, int idx)
|
||||
{
|
||||
env->insn_aux_data[idx].calls_callback = true;
|
||||
}
|
||||
|
||||
static bool calls_callback(struct bpf_verifier_env *env, int insn_idx)
|
||||
{
|
||||
return env->insn_aux_data[insn_idx].calls_callback;
|
||||
}
|
||||
|
||||
enum {
|
||||
DONE_EXPLORING = 0,
|
||||
@ -15521,6 +15630,21 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
* async state will be pushed for further exploration.
|
||||
*/
|
||||
mark_prune_point(env, t);
|
||||
/* For functions that invoke callbacks it is not known how many times
|
||||
* callback would be called. Verifier models callback calling functions
|
||||
* by repeatedly visiting callback bodies and returning to origin call
|
||||
* instruction.
|
||||
* In order to stop such iteration verifier needs to identify when a
|
||||
* state identical some state from a previous iteration is reached.
|
||||
* Check below forces creation of checkpoint before callback calling
|
||||
* instruction to allow search for such identical states.
|
||||
*/
|
||||
if (is_sync_callback_calling_insn(insn)) {
|
||||
mark_calls_callback(env, t);
|
||||
mark_force_checkpoint(env, t);
|
||||
mark_prune_point(env, t);
|
||||
mark_jmp_point(env, t);
|
||||
}
|
||||
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
|
||||
struct bpf_kfunc_call_arg_meta meta;
|
||||
|
||||
@ -16990,10 +17114,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
||||
}
|
||||
goto skip_inf_loop_check;
|
||||
}
|
||||
if (calls_callback(env, insn_idx)) {
|
||||
if (states_equal(env, &sl->state, cur, true))
|
||||
goto hit;
|
||||
goto skip_inf_loop_check;
|
||||
}
|
||||
/* attempt to detect infinite loop to avoid unnecessary doomed work */
|
||||
if (states_maybe_looping(&sl->state, cur) &&
|
||||
states_equal(env, &sl->state, cur, false) &&
|
||||
!iter_active_depths_differ(&sl->state, cur)) {
|
||||
!iter_active_depths_differ(&sl->state, cur) &&
|
||||
sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
|
||||
verbose_linfo(env, insn_idx, "; ");
|
||||
verbose(env, "infinite loop detected at insn %d\n", insn_idx);
|
||||
verbose(env, "cur state:");
|
||||
|
@ -10051,6 +10051,54 @@ void netif_tx_stop_all_queues(struct net_device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(netif_tx_stop_all_queues);
|
||||
|
||||
static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
|
||||
{
|
||||
void __percpu *v;
|
||||
|
||||
/* Drivers implementing ndo_get_peer_dev must support tstat
|
||||
* accounting, so that skb_do_redirect() can bump the dev's
|
||||
* RX stats upon network namespace switch.
|
||||
*/
|
||||
if (dev->netdev_ops->ndo_get_peer_dev &&
|
||||
dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (dev->pcpu_stat_type) {
|
||||
case NETDEV_PCPU_STAT_NONE:
|
||||
return 0;
|
||||
case NETDEV_PCPU_STAT_LSTATS:
|
||||
v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
|
||||
break;
|
||||
case NETDEV_PCPU_STAT_TSTATS:
|
||||
v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
break;
|
||||
case NETDEV_PCPU_STAT_DSTATS:
|
||||
v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return v ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static void netdev_do_free_pcpu_stats(struct net_device *dev)
|
||||
{
|
||||
switch (dev->pcpu_stat_type) {
|
||||
case NETDEV_PCPU_STAT_NONE:
|
||||
return;
|
||||
case NETDEV_PCPU_STAT_LSTATS:
|
||||
free_percpu(dev->lstats);
|
||||
break;
|
||||
case NETDEV_PCPU_STAT_TSTATS:
|
||||
free_percpu(dev->tstats);
|
||||
break;
|
||||
case NETDEV_PCPU_STAT_DSTATS:
|
||||
free_percpu(dev->dstats);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* register_netdevice() - register a network device
|
||||
* @dev: device to register
|
||||
@ -10111,9 +10159,13 @@ int register_netdevice(struct net_device *dev)
|
||||
goto err_uninit;
|
||||
}
|
||||
|
||||
ret = netdev_do_alloc_pcpu_stats(dev);
|
||||
if (ret)
|
||||
goto err_uninit;
|
||||
|
||||
ret = dev_index_reserve(net, dev->ifindex);
|
||||
if (ret < 0)
|
||||
goto err_uninit;
|
||||
goto err_free_pcpu;
|
||||
dev->ifindex = ret;
|
||||
|
||||
/* Transfer changeable features to wanted_features and enable
|
||||
@ -10219,6 +10271,8 @@ err_uninit_notify:
|
||||
call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
|
||||
err_ifindex_release:
|
||||
dev_index_release(net, dev->ifindex);
|
||||
err_free_pcpu:
|
||||
netdev_do_free_pcpu_stats(dev);
|
||||
err_uninit:
|
||||
if (dev->netdev_ops->ndo_uninit)
|
||||
dev->netdev_ops->ndo_uninit(dev);
|
||||
@ -10471,6 +10525,7 @@ void netdev_run_todo(void)
|
||||
WARN_ON(rcu_access_pointer(dev->ip_ptr));
|
||||
WARN_ON(rcu_access_pointer(dev->ip6_ptr));
|
||||
|
||||
netdev_do_free_pcpu_stats(dev);
|
||||
if (dev->priv_destructor)
|
||||
dev->priv_destructor(dev);
|
||||
if (dev->needs_free_netdev)
|
||||
|
@ -81,6 +81,7 @@
|
||||
#include <net/xdp.h>
|
||||
#include <net/mptcp.h>
|
||||
#include <net/netfilter/nf_conntrack_bpf.h>
|
||||
#include <net/netkit.h>
|
||||
#include <linux/un.h>
|
||||
|
||||
#include "dev.h"
|
||||
@ -2468,6 +2469,16 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
|
||||
DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
|
||||
|
||||
static struct net_device *skb_get_peer_dev(struct net_device *dev)
|
||||
{
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
|
||||
if (likely(ops->ndo_get_peer_dev))
|
||||
return INDIRECT_CALL_1(ops->ndo_get_peer_dev,
|
||||
netkit_peer_dev, dev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int skb_do_redirect(struct sk_buff *skb)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
@ -2481,17 +2492,15 @@ int skb_do_redirect(struct sk_buff *skb)
|
||||
if (unlikely(!dev))
|
||||
goto out_drop;
|
||||
if (flags & BPF_F_PEER) {
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
|
||||
if (unlikely(!ops->ndo_get_peer_dev ||
|
||||
!skb_at_tc_ingress(skb)))
|
||||
if (unlikely(!skb_at_tc_ingress(skb)))
|
||||
goto out_drop;
|
||||
dev = ops->ndo_get_peer_dev(dev);
|
||||
dev = skb_get_peer_dev(dev);
|
||||
if (unlikely(!dev ||
|
||||
!(dev->flags & IFF_UP) ||
|
||||
net_eq(net, dev_net(dev))))
|
||||
goto out_drop;
|
||||
skb->dev = dev;
|
||||
dev_sw_netstats_rx_add(dev, skb->len);
|
||||
return -EAGAIN;
|
||||
}
|
||||
return flags & BPF_F_NEIGH ?
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "test_progs.h"
|
||||
#include "network_helpers.h"
|
||||
#include "netlink_helpers.h"
|
||||
#include "test_tc_neigh_fib.skel.h"
|
||||
#include "test_tc_neigh.skel.h"
|
||||
#include "test_tc_peer.skel.h"
|
||||
@ -110,11 +111,17 @@ static void netns_setup_namespaces_nofail(const char *verb)
|
||||
}
|
||||
}
|
||||
|
||||
enum dev_mode {
|
||||
MODE_VETH,
|
||||
MODE_NETKIT,
|
||||
};
|
||||
|
||||
struct netns_setup_result {
|
||||
int ifindex_veth_src;
|
||||
int ifindex_veth_src_fwd;
|
||||
int ifindex_veth_dst;
|
||||
int ifindex_veth_dst_fwd;
|
||||
enum dev_mode dev_mode;
|
||||
int ifindex_src;
|
||||
int ifindex_src_fwd;
|
||||
int ifindex_dst;
|
||||
int ifindex_dst_fwd;
|
||||
};
|
||||
|
||||
static int get_ifaddr(const char *name, char *ifaddr)
|
||||
@ -137,58 +144,110 @@ static int get_ifaddr(const char *name, char *ifaddr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_netkit(int mode, char *prim, char *peer)
|
||||
{
|
||||
struct rtattr *linkinfo, *data, *peer_info;
|
||||
struct rtnl_handle rth = { .fd = -1 };
|
||||
const char *type = "netkit";
|
||||
struct {
|
||||
struct nlmsghdr n;
|
||||
struct ifinfomsg i;
|
||||
char buf[1024];
|
||||
} req = {};
|
||||
int err;
|
||||
|
||||
err = rtnl_open(&rth, 0);
|
||||
if (!ASSERT_OK(err, "open_rtnetlink"))
|
||||
return err;
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
|
||||
req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
|
||||
req.n.nlmsg_type = RTM_NEWLINK;
|
||||
req.i.ifi_family = AF_UNSPEC;
|
||||
|
||||
addattr_l(&req.n, sizeof(req), IFLA_IFNAME, prim, strlen(prim));
|
||||
linkinfo = addattr_nest(&req.n, sizeof(req), IFLA_LINKINFO);
|
||||
addattr_l(&req.n, sizeof(req), IFLA_INFO_KIND, type, strlen(type));
|
||||
data = addattr_nest(&req.n, sizeof(req), IFLA_INFO_DATA);
|
||||
addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode);
|
||||
peer_info = addattr_nest(&req.n, sizeof(req), IFLA_NETKIT_PEER_INFO);
|
||||
req.n.nlmsg_len += sizeof(struct ifinfomsg);
|
||||
addattr_l(&req.n, sizeof(req), IFLA_IFNAME, peer, strlen(peer));
|
||||
addattr_nest_end(&req.n, peer_info);
|
||||
addattr_nest_end(&req.n, data);
|
||||
addattr_nest_end(&req.n, linkinfo);
|
||||
|
||||
err = rtnl_talk(&rth, &req.n, NULL);
|
||||
ASSERT_OK(err, "talk_rtnetlink");
|
||||
rtnl_close(&rth);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int netns_setup_links_and_routes(struct netns_setup_result *result)
|
||||
{
|
||||
struct nstoken *nstoken = NULL;
|
||||
char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {};
|
||||
char src_fwd_addr[IFADDR_STR_LEN+1] = {};
|
||||
int err;
|
||||
|
||||
SYS(fail, "ip link add veth_src type veth peer name veth_src_fwd");
|
||||
SYS(fail, "ip link add veth_dst type veth peer name veth_dst_fwd");
|
||||
if (result->dev_mode == MODE_VETH) {
|
||||
SYS(fail, "ip link add src type veth peer name src_fwd");
|
||||
SYS(fail, "ip link add dst type veth peer name dst_fwd");
|
||||
|
||||
SYS(fail, "ip link set veth_dst_fwd address " MAC_DST_FWD);
|
||||
SYS(fail, "ip link set veth_dst address " MAC_DST);
|
||||
SYS(fail, "ip link set dst_fwd address " MAC_DST_FWD);
|
||||
SYS(fail, "ip link set dst address " MAC_DST);
|
||||
} else if (result->dev_mode == MODE_NETKIT) {
|
||||
err = create_netkit(NETKIT_L3, "src", "src_fwd");
|
||||
if (!ASSERT_OK(err, "create_ifindex_src"))
|
||||
goto fail;
|
||||
err = create_netkit(NETKIT_L3, "dst", "dst_fwd");
|
||||
if (!ASSERT_OK(err, "create_ifindex_dst"))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr))
|
||||
if (get_ifaddr("src_fwd", src_fwd_addr))
|
||||
goto fail;
|
||||
|
||||
result->ifindex_veth_src = if_nametoindex("veth_src");
|
||||
if (!ASSERT_GT(result->ifindex_veth_src, 0, "ifindex_veth_src"))
|
||||
result->ifindex_src = if_nametoindex("src");
|
||||
if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src"))
|
||||
goto fail;
|
||||
|
||||
result->ifindex_veth_src_fwd = if_nametoindex("veth_src_fwd");
|
||||
if (!ASSERT_GT(result->ifindex_veth_src_fwd, 0, "ifindex_veth_src_fwd"))
|
||||
result->ifindex_src_fwd = if_nametoindex("src_fwd");
|
||||
if (!ASSERT_GT(result->ifindex_src_fwd, 0, "ifindex_src_fwd"))
|
||||
goto fail;
|
||||
|
||||
result->ifindex_veth_dst = if_nametoindex("veth_dst");
|
||||
if (!ASSERT_GT(result->ifindex_veth_dst, 0, "ifindex_veth_dst"))
|
||||
result->ifindex_dst = if_nametoindex("dst");
|
||||
if (!ASSERT_GT(result->ifindex_dst, 0, "ifindex_dst"))
|
||||
goto fail;
|
||||
|
||||
result->ifindex_veth_dst_fwd = if_nametoindex("veth_dst_fwd");
|
||||
if (!ASSERT_GT(result->ifindex_veth_dst_fwd, 0, "ifindex_veth_dst_fwd"))
|
||||
result->ifindex_dst_fwd = if_nametoindex("dst_fwd");
|
||||
if (!ASSERT_GT(result->ifindex_dst_fwd, 0, "ifindex_dst_fwd"))
|
||||
goto fail;
|
||||
|
||||
SYS(fail, "ip link set veth_src netns " NS_SRC);
|
||||
SYS(fail, "ip link set veth_src_fwd netns " NS_FWD);
|
||||
SYS(fail, "ip link set veth_dst_fwd netns " NS_FWD);
|
||||
SYS(fail, "ip link set veth_dst netns " NS_DST);
|
||||
SYS(fail, "ip link set src netns " NS_SRC);
|
||||
SYS(fail, "ip link set src_fwd netns " NS_FWD);
|
||||
SYS(fail, "ip link set dst_fwd netns " NS_FWD);
|
||||
SYS(fail, "ip link set dst netns " NS_DST);
|
||||
|
||||
/** setup in 'src' namespace */
|
||||
nstoken = open_netns(NS_SRC);
|
||||
if (!ASSERT_OK_PTR(nstoken, "setns src"))
|
||||
goto fail;
|
||||
|
||||
SYS(fail, "ip addr add " IP4_SRC "/32 dev veth_src");
|
||||
SYS(fail, "ip addr add " IP6_SRC "/128 dev veth_src nodad");
|
||||
SYS(fail, "ip link set dev veth_src up");
|
||||
SYS(fail, "ip addr add " IP4_SRC "/32 dev src");
|
||||
SYS(fail, "ip addr add " IP6_SRC "/128 dev src nodad");
|
||||
SYS(fail, "ip link set dev src up");
|
||||
|
||||
SYS(fail, "ip route add " IP4_DST "/32 dev veth_src scope global");
|
||||
SYS(fail, "ip route add " IP4_NET "/16 dev veth_src scope global");
|
||||
SYS(fail, "ip route add " IP6_DST "/128 dev veth_src scope global");
|
||||
SYS(fail, "ip route add " IP4_DST "/32 dev src scope global");
|
||||
SYS(fail, "ip route add " IP4_NET "/16 dev src scope global");
|
||||
SYS(fail, "ip route add " IP6_DST "/128 dev src scope global");
|
||||
|
||||
SYS(fail, "ip neigh add " IP4_DST " dev veth_src lladdr %s",
|
||||
veth_src_fwd_addr);
|
||||
SYS(fail, "ip neigh add " IP6_DST " dev veth_src lladdr %s",
|
||||
veth_src_fwd_addr);
|
||||
if (result->dev_mode == MODE_VETH) {
|
||||
SYS(fail, "ip neigh add " IP4_DST " dev src lladdr %s",
|
||||
src_fwd_addr);
|
||||
SYS(fail, "ip neigh add " IP6_DST " dev src lladdr %s",
|
||||
src_fwd_addr);
|
||||
}
|
||||
|
||||
close_netns(nstoken);
|
||||
|
||||
@ -201,15 +260,15 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
|
||||
* needs v4 one in order to start ARP probing. IP4_NET route is added
|
||||
* to the endpoints so that the ARP processing will reply.
|
||||
*/
|
||||
SYS(fail, "ip addr add " IP4_SLL "/32 dev veth_src_fwd");
|
||||
SYS(fail, "ip addr add " IP4_DLL "/32 dev veth_dst_fwd");
|
||||
SYS(fail, "ip link set dev veth_src_fwd up");
|
||||
SYS(fail, "ip link set dev veth_dst_fwd up");
|
||||
SYS(fail, "ip addr add " IP4_SLL "/32 dev src_fwd");
|
||||
SYS(fail, "ip addr add " IP4_DLL "/32 dev dst_fwd");
|
||||
SYS(fail, "ip link set dev src_fwd up");
|
||||
SYS(fail, "ip link set dev dst_fwd up");
|
||||
|
||||
SYS(fail, "ip route add " IP4_SRC "/32 dev veth_src_fwd scope global");
|
||||
SYS(fail, "ip route add " IP6_SRC "/128 dev veth_src_fwd scope global");
|
||||
SYS(fail, "ip route add " IP4_DST "/32 dev veth_dst_fwd scope global");
|
||||
SYS(fail, "ip route add " IP6_DST "/128 dev veth_dst_fwd scope global");
|
||||
SYS(fail, "ip route add " IP4_SRC "/32 dev src_fwd scope global");
|
||||
SYS(fail, "ip route add " IP6_SRC "/128 dev src_fwd scope global");
|
||||
SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global");
|
||||
SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global");
|
||||
|
||||
close_netns(nstoken);
|
||||
|
||||
@ -218,16 +277,18 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
|
||||
if (!ASSERT_OK_PTR(nstoken, "setns dst"))
|
||||
goto fail;
|
||||
|
||||
SYS(fail, "ip addr add " IP4_DST "/32 dev veth_dst");
|
||||
SYS(fail, "ip addr add " IP6_DST "/128 dev veth_dst nodad");
|
||||
SYS(fail, "ip link set dev veth_dst up");
|
||||
SYS(fail, "ip addr add " IP4_DST "/32 dev dst");
|
||||
SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad");
|
||||
SYS(fail, "ip link set dev dst up");
|
||||
|
||||
SYS(fail, "ip route add " IP4_SRC "/32 dev veth_dst scope global");
|
||||
SYS(fail, "ip route add " IP4_NET "/16 dev veth_dst scope global");
|
||||
SYS(fail, "ip route add " IP6_SRC "/128 dev veth_dst scope global");
|
||||
SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global");
|
||||
SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global");
|
||||
SYS(fail, "ip route add " IP6_SRC "/128 dev dst scope global");
|
||||
|
||||
SYS(fail, "ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD);
|
||||
SYS(fail, "ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD);
|
||||
if (result->dev_mode == MODE_VETH) {
|
||||
SYS(fail, "ip neigh add " IP4_SRC " dev dst lladdr " MAC_DST_FWD);
|
||||
SYS(fail, "ip neigh add " IP6_SRC " dev dst lladdr " MAC_DST_FWD);
|
||||
}
|
||||
|
||||
close_netns(nstoken);
|
||||
|
||||
@ -293,23 +354,23 @@ static int netns_load_bpf(const struct bpf_program *src_prog,
|
||||
const struct bpf_program *chk_prog,
|
||||
const struct netns_setup_result *setup_result)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
|
||||
int err;
|
||||
|
||||
/* tc qdisc add dev veth_src_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd);
|
||||
/* tc filter add dev veth_src_fwd ingress bpf da src_prog */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS, src_prog, 0);
|
||||
/* tc filter add dev veth_src_fwd egress bpf da chk_prog */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS, chk_prog, 0);
|
||||
/* tc qdisc add dev src_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd);
|
||||
/* tc filter add dev src_fwd ingress bpf da src_prog */
|
||||
XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS, src_prog, 0);
|
||||
/* tc filter add dev src_fwd egress bpf da chk_prog */
|
||||
XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS, chk_prog, 0);
|
||||
|
||||
/* tc qdisc add dev veth_dst_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
|
||||
/* tc filter add dev veth_dst_fwd ingress bpf da dst_prog */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, dst_prog, 0);
|
||||
/* tc filter add dev veth_dst_fwd egress bpf da chk_prog */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, chk_prog, 0);
|
||||
/* tc qdisc add dev dst_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
|
||||
/* tc filter add dev dst_fwd ingress bpf da dst_prog */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, dst_prog, 0);
|
||||
/* tc filter add dev dst_fwd egress bpf da chk_prog */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, chk_prog, 0);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
@ -539,10 +600,10 @@ done:
|
||||
static int netns_load_dtime_bpf(struct test_tc_dtime *skel,
|
||||
const struct netns_setup_result *setup_result)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_src);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_dst);
|
||||
struct nstoken *nstoken;
|
||||
int err;
|
||||
|
||||
@ -550,58 +611,58 @@ static int netns_load_dtime_bpf(struct test_tc_dtime *skel,
|
||||
nstoken = open_netns(NS_SRC);
|
||||
if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
|
||||
return -1;
|
||||
/* tc qdisc add dev veth_src clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_veth_src, setup_result->ifindex_veth_src);
|
||||
/* tc filter add dev veth_src ingress bpf da ingress_host */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
|
||||
/* tc filter add dev veth_src egress bpf da egress_host */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_EGRESS, skel->progs.egress_host, 0);
|
||||
/* tc qdisc add dev src clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_src, setup_result->ifindex_src);
|
||||
/* tc filter add dev src ingress bpf da ingress_host */
|
||||
XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
|
||||
/* tc filter add dev src egress bpf da egress_host */
|
||||
XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_EGRESS, skel->progs.egress_host, 0);
|
||||
close_netns(nstoken);
|
||||
|
||||
/* setup ns_dst tc progs */
|
||||
nstoken = open_netns(NS_DST);
|
||||
if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST))
|
||||
return -1;
|
||||
/* tc qdisc add dev veth_dst clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_veth_dst, setup_result->ifindex_veth_dst);
|
||||
/* tc filter add dev veth_dst ingress bpf da ingress_host */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
|
||||
/* tc filter add dev veth_dst egress bpf da egress_host */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0);
|
||||
/* tc qdisc add dev dst clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_dst, setup_result->ifindex_dst);
|
||||
/* tc filter add dev dst ingress bpf da ingress_host */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
|
||||
/* tc filter add dev dst egress bpf da egress_host */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0);
|
||||
close_netns(nstoken);
|
||||
|
||||
/* setup ns_fwd tc progs */
|
||||
nstoken = open_netns(NS_FWD);
|
||||
if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
|
||||
return -1;
|
||||
/* tc qdisc add dev veth_dst_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
|
||||
/* tc filter add dev veth_dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS,
|
||||
/* tc qdisc add dev dst_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
|
||||
/* tc filter add dev dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS,
|
||||
skel->progs.ingress_fwdns_prio100, 100);
|
||||
/* tc filter add dev veth_dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS,
|
||||
/* tc filter add dev dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS,
|
||||
skel->progs.ingress_fwdns_prio101, 101);
|
||||
/* tc filter add dev veth_dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS,
|
||||
/* tc filter add dev dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS,
|
||||
skel->progs.egress_fwdns_prio100, 100);
|
||||
/* tc filter add dev veth_dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS,
|
||||
/* tc filter add dev dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS,
|
||||
skel->progs.egress_fwdns_prio101, 101);
|
||||
|
||||
/* tc qdisc add dev veth_src_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd);
|
||||
/* tc filter add dev veth_src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS,
|
||||
/* tc qdisc add dev src_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd);
|
||||
/* tc filter add dev src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
|
||||
XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS,
|
||||
skel->progs.ingress_fwdns_prio100, 100);
|
||||
/* tc filter add dev veth_src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS,
|
||||
/* tc filter add dev src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
|
||||
XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS,
|
||||
skel->progs.ingress_fwdns_prio101, 101);
|
||||
/* tc filter add dev veth_src_fwd egress prio 100 bpf da egress_fwdns_prio100 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS,
|
||||
/* tc filter add dev src_fwd egress prio 100 bpf da egress_fwdns_prio100 */
|
||||
XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS,
|
||||
skel->progs.egress_fwdns_prio100, 100);
|
||||
/* tc filter add dev veth_src_fwd egress prio 101 bpf da egress_fwdns_prio101 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS,
|
||||
/* tc filter add dev src_fwd egress prio 101 bpf da egress_fwdns_prio101 */
|
||||
XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS,
|
||||
skel->progs.egress_fwdns_prio101, 101);
|
||||
close_netns(nstoken);
|
||||
return 0;
|
||||
@ -777,8 +838,8 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
|
||||
if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open"))
|
||||
return;
|
||||
|
||||
skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
|
||||
skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
|
||||
skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
|
||||
skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
|
||||
|
||||
err = test_tc_dtime__load(skel);
|
||||
if (!ASSERT_OK(err, "test_tc_dtime__load"))
|
||||
@ -868,8 +929,8 @@ static void test_tc_redirect_neigh(struct netns_setup_result *setup_result)
|
||||
if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open"))
|
||||
goto done;
|
||||
|
||||
skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
|
||||
skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
|
||||
skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
|
||||
skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
|
||||
|
||||
err = test_tc_neigh__load(skel);
|
||||
if (!ASSERT_OK(err, "test_tc_neigh__load"))
|
||||
@ -904,8 +965,8 @@ static void test_tc_redirect_peer(struct netns_setup_result *setup_result)
|
||||
if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
|
||||
goto done;
|
||||
|
||||
skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
|
||||
skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
|
||||
skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
|
||||
skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
|
||||
|
||||
err = test_tc_peer__load(skel);
|
||||
if (!ASSERT_OK(err, "test_tc_peer__load"))
|
||||
@ -996,7 +1057,7 @@ static int tun_relay_loop(int src_fd, int target_fd)
|
||||
static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_tun_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
|
||||
LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
|
||||
struct test_tc_peer *skel = NULL;
|
||||
struct nstoken *nstoken = NULL;
|
||||
int err;
|
||||
@ -1045,7 +1106,7 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
|
||||
goto fail;
|
||||
|
||||
skel->rodata->IFINDEX_SRC = ifindex;
|
||||
skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
|
||||
skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
|
||||
|
||||
err = test_tc_peer__load(skel);
|
||||
if (!ASSERT_OK(err, "test_tc_peer__load"))
|
||||
@ -1053,19 +1114,19 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
|
||||
|
||||
/* Load "tc_src_l3" to the tun_fwd interface to redirect packets
|
||||
* towards dst, and "tc_dst" to redirect packets
|
||||
* and "tc_chk" on veth_dst_fwd to drop non-redirected packets.
|
||||
* and "tc_chk" on dst_fwd to drop non-redirected packets.
|
||||
*/
|
||||
/* tc qdisc add dev tun_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_tun_fwd, ifindex);
|
||||
/* tc filter add dev tun_fwd ingress bpf da tc_src_l3 */
|
||||
XGRESS_FILTER_ADD(&qdisc_tun_fwd, BPF_TC_INGRESS, skel->progs.tc_src_l3, 0);
|
||||
|
||||
/* tc qdisc add dev veth_dst_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
|
||||
/* tc filter add dev veth_dst_fwd ingress bpf da tc_dst_l3 */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0);
|
||||
/* tc filter add dev veth_dst_fwd egress bpf da tc_chk */
|
||||
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0);
|
||||
/* tc qdisc add dev dst_fwd clsact */
|
||||
QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
|
||||
/* tc filter add dev dst_fwd ingress bpf da tc_dst_l3 */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0);
|
||||
/* tc filter add dev dst_fwd egress bpf da tc_chk */
|
||||
XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0);
|
||||
|
||||
/* Setup route and neigh tables */
|
||||
SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24");
|
||||
@ -1074,17 +1135,17 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
|
||||
SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad");
|
||||
SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad");
|
||||
|
||||
SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global");
|
||||
SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev src scope global");
|
||||
SYS(fail, "ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD
|
||||
" dev tun_src scope global");
|
||||
SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global");
|
||||
SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global");
|
||||
SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev dst scope global");
|
||||
SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev src scope global");
|
||||
SYS(fail, "ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD
|
||||
" dev tun_src scope global");
|
||||
SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global");
|
||||
SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev dst scope global");
|
||||
|
||||
SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
|
||||
SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
|
||||
SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev dst lladdr " MAC_DST_FWD);
|
||||
SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev dst lladdr " MAC_DST_FWD);
|
||||
|
||||
if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
|
||||
goto fail;
|
||||
@ -1106,9 +1167,9 @@ fail:
|
||||
close_netns(nstoken);
|
||||
}
|
||||
|
||||
#define RUN_TEST(name) \
|
||||
#define RUN_TEST(name, mode) \
|
||||
({ \
|
||||
struct netns_setup_result setup_result; \
|
||||
struct netns_setup_result setup_result = { .dev_mode = mode, }; \
|
||||
if (test__start_subtest(#name)) \
|
||||
if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \
|
||||
if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \
|
||||
@ -1122,11 +1183,13 @@ static void *test_tc_redirect_run_tests(void *arg)
|
||||
{
|
||||
netns_setup_namespaces_nofail("delete");
|
||||
|
||||
RUN_TEST(tc_redirect_peer);
|
||||
RUN_TEST(tc_redirect_peer_l3);
|
||||
RUN_TEST(tc_redirect_neigh);
|
||||
RUN_TEST(tc_redirect_neigh_fib);
|
||||
RUN_TEST(tc_redirect_dtime);
|
||||
RUN_TEST(tc_redirect_peer, MODE_VETH);
|
||||
RUN_TEST(tc_redirect_peer, MODE_NETKIT);
|
||||
RUN_TEST(tc_redirect_peer_l3, MODE_VETH);
|
||||
RUN_TEST(tc_redirect_peer_l3, MODE_NETKIT);
|
||||
RUN_TEST(tc_redirect_neigh, MODE_VETH);
|
||||
RUN_TEST(tc_redirect_neigh_fib, MODE_VETH);
|
||||
RUN_TEST(tc_redirect_dtime, MODE_VETH);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "verifier_helper_restricted.skel.h"
|
||||
#include "verifier_helper_value_access.skel.h"
|
||||
#include "verifier_int_ptr.skel.h"
|
||||
#include "verifier_iterating_callbacks.skel.h"
|
||||
#include "verifier_jeq_infer_not_null.skel.h"
|
||||
#include "verifier_ld_ind.skel.h"
|
||||
#include "verifier_ldsx.skel.h"
|
||||
@ -139,6 +140,7 @@ void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_acces
|
||||
void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
|
||||
void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); }
|
||||
void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
|
||||
void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); }
|
||||
void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
|
||||
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
|
||||
void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
|
||||
|
@ -15,13 +15,16 @@ static int empty_callback(__u32 index, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int outer_loop(__u32 index, void *data)
|
||||
{
|
||||
bpf_loop(nr_loops, empty_callback, NULL, 0);
|
||||
__sync_add_and_fetch(&hits, nr_loops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fentry/" SYS_PREFIX "sys_getpgid")
|
||||
int benchmark(void *ctx)
|
||||
{
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
bpf_loop(nr_loops, empty_callback, NULL, 0);
|
||||
|
||||
__sync_add_and_fetch(&hits, nr_loops);
|
||||
}
|
||||
bpf_loop(1000, outer_loop, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ int underflow_prog(void *ctx)
|
||||
if (!p)
|
||||
return 0;
|
||||
bpf_for_each_map_elem(&array_map, cb1, &p, 0);
|
||||
bpf_kfunc_call_test_release(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -171,6 +171,7 @@ int reject_with_rbtree_add_throw(void *ctx)
|
||||
return 0;
|
||||
bpf_spin_lock(&lock);
|
||||
bpf_rbtree_add(&rbtree, &f->node, rbless);
|
||||
bpf_spin_unlock(&lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -214,6 +215,7 @@ int reject_with_cb_reference(void *ctx)
|
||||
if (!f)
|
||||
return 0;
|
||||
bpf_loop(5, subprog_cb_ref, NULL, 0);
|
||||
bpf_obj_drop(f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -24,9 +24,11 @@ struct task_struct {};
|
||||
#define STACK_TABLE_EPOCH_SHIFT 20
|
||||
#define STROBE_MAX_STR_LEN 1
|
||||
#define STROBE_MAX_CFGS 32
|
||||
#define READ_MAP_VAR_PAYLOAD_CAP \
|
||||
((1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
|
||||
#define STROBE_MAX_PAYLOAD \
|
||||
(STROBE_MAX_STRS * STROBE_MAX_STR_LEN + \
|
||||
STROBE_MAX_MAPS * (1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
|
||||
STROBE_MAX_MAPS * READ_MAP_VAR_PAYLOAD_CAP)
|
||||
|
||||
struct strobe_value_header {
|
||||
/*
|
||||
@ -355,7 +357,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
|
||||
size_t idx, void *tls_base,
|
||||
struct strobe_value_generic *value,
|
||||
struct strobemeta_payload *data,
|
||||
void *payload)
|
||||
size_t off)
|
||||
{
|
||||
void *location;
|
||||
uint64_t len;
|
||||
@ -366,7 +368,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
|
||||
return 0;
|
||||
|
||||
bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
|
||||
len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr);
|
||||
len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr);
|
||||
/*
|
||||
* if bpf_probe_read_user_str returns error (<0), due to casting to
|
||||
* unsinged int, it will become big number, so next check is
|
||||
@ -378,14 +380,14 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
|
||||
return 0;
|
||||
|
||||
data->str_lens[idx] = len;
|
||||
return len;
|
||||
return off + len;
|
||||
}
|
||||
|
||||
static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
|
||||
size_t idx, void *tls_base,
|
||||
struct strobe_value_generic *value,
|
||||
struct strobemeta_payload *data,
|
||||
void *payload)
|
||||
static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg,
|
||||
size_t idx, void *tls_base,
|
||||
struct strobe_value_generic *value,
|
||||
struct strobemeta_payload *data,
|
||||
size_t off)
|
||||
{
|
||||
struct strobe_map_descr* descr = &data->map_descrs[idx];
|
||||
struct strobe_map_raw map;
|
||||
@ -397,11 +399,11 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
|
||||
|
||||
location = calc_location(&cfg->map_locs[idx], tls_base);
|
||||
if (!location)
|
||||
return payload;
|
||||
return off;
|
||||
|
||||
bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
|
||||
if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr))
|
||||
return payload;
|
||||
return off;
|
||||
|
||||
descr->id = map.id;
|
||||
descr->cnt = map.cnt;
|
||||
@ -410,10 +412,10 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
|
||||
data->req_meta_valid = 1;
|
||||
}
|
||||
|
||||
len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag);
|
||||
len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.tag);
|
||||
if (len <= STROBE_MAX_STR_LEN) {
|
||||
descr->tag_len = len;
|
||||
payload += len;
|
||||
off += len;
|
||||
}
|
||||
|
||||
#ifdef NO_UNROLL
|
||||
@ -426,22 +428,22 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
|
||||
break;
|
||||
|
||||
descr->key_lens[i] = 0;
|
||||
len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
|
||||
len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
|
||||
map.entries[i].key);
|
||||
if (len <= STROBE_MAX_STR_LEN) {
|
||||
descr->key_lens[i] = len;
|
||||
payload += len;
|
||||
off += len;
|
||||
}
|
||||
descr->val_lens[i] = 0;
|
||||
len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
|
||||
len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
|
||||
map.entries[i].val);
|
||||
if (len <= STROBE_MAX_STR_LEN) {
|
||||
descr->val_lens[i] = len;
|
||||
payload += len;
|
||||
off += len;
|
||||
}
|
||||
}
|
||||
|
||||
return payload;
|
||||
return off;
|
||||
}
|
||||
|
||||
#ifdef USE_BPF_LOOP
|
||||
@ -455,14 +457,20 @@ struct read_var_ctx {
|
||||
struct strobemeta_payload *data;
|
||||
void *tls_base;
|
||||
struct strobemeta_cfg *cfg;
|
||||
void *payload;
|
||||
size_t payload_off;
|
||||
/* value gets mutated */
|
||||
struct strobe_value_generic *value;
|
||||
enum read_type type;
|
||||
};
|
||||
|
||||
static int read_var_callback(__u32 index, struct read_var_ctx *ctx)
|
||||
static int read_var_callback(__u64 index, struct read_var_ctx *ctx)
|
||||
{
|
||||
/* lose precision info for ctx->payload_off, verifier won't track
|
||||
* double xor, barrier_var() is needed to force clang keep both xors.
|
||||
*/
|
||||
ctx->payload_off ^= index;
|
||||
barrier_var(ctx->payload_off);
|
||||
ctx->payload_off ^= index;
|
||||
switch (ctx->type) {
|
||||
case READ_INT_VAR:
|
||||
if (index >= STROBE_MAX_INTS)
|
||||
@ -472,14 +480,18 @@ static int read_var_callback(__u32 index, struct read_var_ctx *ctx)
|
||||
case READ_MAP_VAR:
|
||||
if (index >= STROBE_MAX_MAPS)
|
||||
return 1;
|
||||
ctx->payload = read_map_var(ctx->cfg, index, ctx->tls_base,
|
||||
ctx->value, ctx->data, ctx->payload);
|
||||
if (ctx->payload_off > sizeof(ctx->data->payload) - READ_MAP_VAR_PAYLOAD_CAP)
|
||||
return 1;
|
||||
ctx->payload_off = read_map_var(ctx->cfg, index, ctx->tls_base,
|
||||
ctx->value, ctx->data, ctx->payload_off);
|
||||
break;
|
||||
case READ_STR_VAR:
|
||||
if (index >= STROBE_MAX_STRS)
|
||||
return 1;
|
||||
ctx->payload += read_str_var(ctx->cfg, index, ctx->tls_base,
|
||||
ctx->value, ctx->data, ctx->payload);
|
||||
if (ctx->payload_off > sizeof(ctx->data->payload) - STROBE_MAX_STR_LEN)
|
||||
return 1;
|
||||
ctx->payload_off = read_str_var(ctx->cfg, index, ctx->tls_base,
|
||||
ctx->value, ctx->data, ctx->payload_off);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
@ -501,7 +513,8 @@ static void *read_strobe_meta(struct task_struct *task,
|
||||
pid_t pid = bpf_get_current_pid_tgid() >> 32;
|
||||
struct strobe_value_generic value = {0};
|
||||
struct strobemeta_cfg *cfg;
|
||||
void *tls_base, *payload;
|
||||
size_t payload_off;
|
||||
void *tls_base;
|
||||
|
||||
cfg = bpf_map_lookup_elem(&strobemeta_cfgs, &pid);
|
||||
if (!cfg)
|
||||
@ -509,7 +522,7 @@ static void *read_strobe_meta(struct task_struct *task,
|
||||
|
||||
data->int_vals_set_mask = 0;
|
||||
data->req_meta_valid = 0;
|
||||
payload = data->payload;
|
||||
payload_off = 0;
|
||||
/*
|
||||
* we don't have struct task_struct definition, it should be:
|
||||
* tls_base = (void *)task->thread.fsbase;
|
||||
@ -522,7 +535,7 @@ static void *read_strobe_meta(struct task_struct *task,
|
||||
.tls_base = tls_base,
|
||||
.value = &value,
|
||||
.data = data,
|
||||
.payload = payload,
|
||||
.payload_off = 0,
|
||||
};
|
||||
int err;
|
||||
|
||||
@ -540,6 +553,11 @@ static void *read_strobe_meta(struct task_struct *task,
|
||||
err = bpf_loop(STROBE_MAX_MAPS, read_var_callback, &ctx, 0);
|
||||
if (err != STROBE_MAX_MAPS)
|
||||
return NULL;
|
||||
|
||||
payload_off = ctx.payload_off;
|
||||
/* this should not really happen, here only to satisfy verifer */
|
||||
if (payload_off > sizeof(data->payload))
|
||||
payload_off = sizeof(data->payload);
|
||||
#else
|
||||
#ifdef NO_UNROLL
|
||||
#pragma clang loop unroll(disable)
|
||||
@ -555,7 +573,7 @@ static void *read_strobe_meta(struct task_struct *task,
|
||||
#pragma unroll
|
||||
#endif /* NO_UNROLL */
|
||||
for (int i = 0; i < STROBE_MAX_STRS; ++i) {
|
||||
payload += read_str_var(cfg, i, tls_base, &value, data, payload);
|
||||
payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off);
|
||||
}
|
||||
#ifdef NO_UNROLL
|
||||
#pragma clang loop unroll(disable)
|
||||
@ -563,7 +581,7 @@ static void *read_strobe_meta(struct task_struct *task,
|
||||
#pragma unroll
|
||||
#endif /* NO_UNROLL */
|
||||
for (int i = 0; i < STROBE_MAX_MAPS; ++i) {
|
||||
payload = read_map_var(cfg, i, tls_base, &value, data, payload);
|
||||
payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off);
|
||||
}
|
||||
#endif /* USE_BPF_LOOP */
|
||||
|
||||
@ -571,7 +589,7 @@ static void *read_strobe_meta(struct task_struct *task,
|
||||
* return pointer right after end of payload, so it's possible to
|
||||
* calculate exact amount of useful data that needs to be sent
|
||||
*/
|
||||
return payload;
|
||||
return &data->payload[payload_off];
|
||||
}
|
||||
|
||||
SEC("raw_tracepoint/kfree_skb")
|
||||
|
242
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
Normal file
242
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
Normal file
@ -0,0 +1,242 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 8);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_USER_RINGBUF);
|
||||
__uint(max_entries, 8);
|
||||
} ringbuf SEC(".maps");
|
||||
|
||||
struct vm_area_struct;
|
||||
struct bpf_map;
|
||||
|
||||
struct buf_context {
|
||||
char *buf;
|
||||
};
|
||||
|
||||
struct num_context {
|
||||
__u64 i;
|
||||
__u64 j;
|
||||
};
|
||||
|
||||
__u8 choice_arr[2] = { 0, 1 };
|
||||
|
||||
static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
|
||||
{
|
||||
if (idx == 0) {
|
||||
ctx->buf = (char *)(0xDEAD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("R1 type=scalar expected=fp")
|
||||
int unsafe_on_2nd_iter(void *unused)
|
||||
{
|
||||
char buf[4];
|
||||
struct buf_context loop_ctx = { .buf = buf };
|
||||
|
||||
bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
|
||||
{
|
||||
ctx->i = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
|
||||
int unsafe_on_zero_iter(void *unused)
|
||||
{
|
||||
struct num_context loop_ctx = { .i = 32 };
|
||||
|
||||
bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
|
||||
return choice_arr[loop_ctx.i];
|
||||
}
|
||||
|
||||
static int widening_cb(__u32 idx, struct num_context *ctx)
|
||||
{
|
||||
++ctx->i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success
|
||||
int widening(void *unused)
|
||||
{
|
||||
struct num_context loop_ctx = { .i = 0, .j = 1 };
|
||||
|
||||
bpf_loop(100, widening_cb, &loop_ctx, 0);
|
||||
/* loop_ctx.j is not changed during callback iteration,
|
||||
* verifier should not apply widening to it.
|
||||
*/
|
||||
return choice_arr[loop_ctx.j];
|
||||
}
|
||||
|
||||
static int loop_detection_cb(__u32 idx, struct num_context *ctx)
|
||||
{
|
||||
for (;;) {}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("infinite loop detected")
|
||||
int loop_detection(void *unused)
|
||||
{
|
||||
struct num_context loop_ctx = { .i = 0 };
|
||||
|
||||
bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline __u64 oob_state_machine(struct num_context *ctx)
|
||||
{
|
||||
switch (ctx->i) {
|
||||
case 0:
|
||||
ctx->i = 1;
|
||||
break;
|
||||
case 1:
|
||||
ctx->i = 32;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
|
||||
{
|
||||
return oob_state_machine(data);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
|
||||
int unsafe_for_each_map_elem(void *unused)
|
||||
{
|
||||
struct num_context loop_ctx = { .i = 0 };
|
||||
|
||||
bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
|
||||
return choice_arr[loop_ctx.i];
|
||||
}
|
||||
|
||||
static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
|
||||
{
|
||||
return oob_state_machine(data);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
|
||||
int unsafe_ringbuf_drain(void *unused)
|
||||
{
|
||||
struct num_context loop_ctx = { .i = 0 };
|
||||
|
||||
bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
|
||||
return choice_arr[loop_ctx.i];
|
||||
}
|
||||
|
||||
static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
|
||||
{
|
||||
return oob_state_machine(data);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
|
||||
int unsafe_find_vma(void *unused)
|
||||
{
|
||||
struct task_struct *task = bpf_get_current_task_btf();
|
||||
struct num_context loop_ctx = { .i = 0 };
|
||||
|
||||
bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
|
||||
return choice_arr[loop_ctx.i];
|
||||
}
|
||||
|
||||
static int iter_limit_cb(__u32 idx, struct num_context *ctx)
|
||||
{
|
||||
ctx->i++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success
|
||||
int bpf_loop_iter_limit_ok(void *unused)
|
||||
{
|
||||
struct num_context ctx = { .i = 0 };
|
||||
|
||||
bpf_loop(1, iter_limit_cb, &ctx, 0);
|
||||
return choice_arr[ctx.i];
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__failure __msg("invalid access to map value, value_size=2 off=2 size=1")
|
||||
int bpf_loop_iter_limit_overflow(void *unused)
|
||||
{
|
||||
struct num_context ctx = { .i = 0 };
|
||||
|
||||
bpf_loop(2, iter_limit_cb, &ctx, 0);
|
||||
return choice_arr[ctx.i];
|
||||
}
|
||||
|
||||
static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
|
||||
{
|
||||
ctx->i += 100;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
|
||||
{
|
||||
ctx->i += 10;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
|
||||
{
|
||||
ctx->i += 1;
|
||||
bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
|
||||
bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check that path visiting every callback function once had been
|
||||
* reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
|
||||
* with each decimal digit corresponding to a callback visit marker.
|
||||
*/
|
||||
SEC("socket")
|
||||
__success __retval(111111)
|
||||
int bpf_loop_iter_limit_nested(void *unused)
|
||||
{
|
||||
struct num_context ctx1 = { .i = 0 };
|
||||
struct num_context ctx2 = { .i = 0 };
|
||||
__u64 a, b, c;
|
||||
|
||||
bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
|
||||
bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
|
||||
a = ctx1.i;
|
||||
b = ctx2.i;
|
||||
/* Force 'ctx1.i' and 'ctx2.i' precise. */
|
||||
c = choice_arr[(a + b) % 2];
|
||||
/* This makes 'c' zero, but neither clang nor verifier know it. */
|
||||
c /= 10;
|
||||
/* Make sure that verifier does not visit 'impossible' states:
|
||||
* enumerate all possible callback visit masks.
|
||||
*/
|
||||
if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
|
||||
b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
|
||||
asm volatile ("r0 /= 0;" ::: "r0");
|
||||
return 1000 * a + b + c;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -119,15 +119,41 @@ __naked int global_subprog_result_precise(void)
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
/* First simulated path does not include callback body,
|
||||
* r1 and r4 are always precise for bpf_loop() calls.
|
||||
*/
|
||||
__msg("9: (85) call bpf_loop#181")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
|
||||
__msg("mark_precise: frame0: parent state regs=r4 stack=:")
|
||||
__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
|
||||
__msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
|
||||
__msg("mark_precise: frame0: parent state regs=r1 stack=:")
|
||||
__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
|
||||
__msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0")
|
||||
__msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0")
|
||||
__msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8")
|
||||
__msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
|
||||
/* r6 precision propagation */
|
||||
__msg("14: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 14 first_idx 10")
|
||||
__msg("mark_precise: frame0: last_idx 14 first_idx 9")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
|
||||
__msg("mark_precise: frame0: parent state regs=r0 stack=:")
|
||||
__msg("mark_precise: frame0: last_idx 18 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
|
||||
/* State entering callback body popped from states stack */
|
||||
__msg("from 9 to 17: frame1:")
|
||||
__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
|
||||
__msg("17: (b7) r0 = 0")
|
||||
__msg("18: (95) exit")
|
||||
__msg("returning from callee:")
|
||||
__msg("to caller at 9:")
|
||||
__msg("frame 0: propagating r1,r4")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
|
||||
__msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit")
|
||||
__msg("from 18 to 9: safe")
|
||||
__naked int callback_result_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
@ -233,20 +259,36 @@ __naked int parent_callee_saved_reg_precise_global(void)
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
/* First simulated path does not include callback body */
|
||||
__msg("12: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 12 first_idx 10")
|
||||
__msg("mark_precise: frame0: last_idx 12 first_idx 9")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
|
||||
__msg("mark_precise: frame0: parent state regs=r6 stack=:")
|
||||
__msg("mark_precise: frame0: last_idx 16 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
|
||||
__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
|
||||
__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
|
||||
/* State entering callback body popped from states stack */
|
||||
__msg("from 9 to 15: frame1:")
|
||||
__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
|
||||
__msg("15: (b7) r0 = 0")
|
||||
__msg("16: (95) exit")
|
||||
__msg("returning from callee:")
|
||||
__msg("to caller at 9:")
|
||||
/* r1, r4 are always precise for bpf_loop(),
|
||||
* r6 was marked before backtracking to callback body.
|
||||
*/
|
||||
__msg("frame 0: propagating r1,r4,r6")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
|
||||
__msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
|
||||
__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=:")
|
||||
__msg("from 16 to 9: safe")
|
||||
__naked int parent_callee_saved_reg_precise_with_callback(void)
|
||||
{
|
||||
asm volatile (
|
||||
@ -373,22 +415,38 @@ __naked int parent_stack_slot_precise_global(void)
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
/* First simulated path does not include callback body */
|
||||
__msg("14: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 14 first_idx 11")
|
||||
__msg("mark_precise: frame0: last_idx 14 first_idx 10")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=-8:")
|
||||
__msg("mark_precise: frame0: last_idx 18 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
|
||||
__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
|
||||
/* State entering callback body popped from states stack */
|
||||
__msg("from 10 to 17: frame1:")
|
||||
__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
|
||||
__msg("17: (b7) r0 = 0")
|
||||
__msg("18: (95) exit")
|
||||
__msg("returning from callee:")
|
||||
__msg("to caller at 10:")
|
||||
/* r1, r4 are always precise for bpf_loop(),
|
||||
* fp-8 was marked before backtracking to callback body.
|
||||
*/
|
||||
__msg("frame 0: propagating r1,r4,fp-8")
|
||||
__msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
|
||||
__msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
|
||||
__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=:")
|
||||
__msg("from 18 to 10: safe")
|
||||
__naked int parent_stack_slot_precise_with_callback(void)
|
||||
{
|
||||
asm volatile (
|
||||
|
@ -53,6 +53,8 @@
|
||||
#define DEFAULT_TTL 64
|
||||
#define MAX_ALLOWED_PORTS 8
|
||||
|
||||
#define MAX_PACKET_OFF 0xffff
|
||||
|
||||
#define swap(a, b) \
|
||||
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
|
||||
|
||||
@ -183,63 +185,76 @@ static __always_inline __u32 tcp_clock_ms(void)
|
||||
}
|
||||
|
||||
struct tcpopt_context {
|
||||
__u8 *ptr;
|
||||
__u8 *end;
|
||||
void *data;
|
||||
void *data_end;
|
||||
__be32 *tsecr;
|
||||
__u8 wscale;
|
||||
bool option_timestamp;
|
||||
bool option_sack;
|
||||
__u32 off;
|
||||
};
|
||||
|
||||
static __always_inline u8 *next(struct tcpopt_context *ctx, __u32 sz)
|
||||
{
|
||||
__u64 off = ctx->off;
|
||||
__u8 *data;
|
||||
|
||||
/* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */
|
||||
if (off > MAX_PACKET_OFF - sz)
|
||||
return NULL;
|
||||
|
||||
data = ctx->data + off;
|
||||
barrier_var(data);
|
||||
if (data + sz >= ctx->data_end)
|
||||
return NULL;
|
||||
|
||||
ctx->off += sz;
|
||||
return data;
|
||||
}
|
||||
|
||||
static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
|
||||
{
|
||||
__u8 opcode, opsize;
|
||||
__u8 *opcode, *opsize, *wscale, *tsecr;
|
||||
__u32 off = ctx->off;
|
||||
|
||||
if (ctx->ptr >= ctx->end)
|
||||
return 1;
|
||||
if (ctx->ptr >= ctx->data_end)
|
||||
opcode = next(ctx, 1);
|
||||
if (!opcode)
|
||||
return 1;
|
||||
|
||||
opcode = ctx->ptr[0];
|
||||
|
||||
if (opcode == TCPOPT_EOL)
|
||||
if (*opcode == TCPOPT_EOL)
|
||||
return 1;
|
||||
if (opcode == TCPOPT_NOP) {
|
||||
++ctx->ptr;
|
||||
if (*opcode == TCPOPT_NOP)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ctx->ptr + 1 >= ctx->end)
|
||||
return 1;
|
||||
if (ctx->ptr + 1 >= ctx->data_end)
|
||||
return 1;
|
||||
opsize = ctx->ptr[1];
|
||||
if (opsize < 2)
|
||||
opsize = next(ctx, 1);
|
||||
if (!opsize || *opsize < 2)
|
||||
return 1;
|
||||
|
||||
if (ctx->ptr + opsize > ctx->end)
|
||||
return 1;
|
||||
|
||||
switch (opcode) {
|
||||
switch (*opcode) {
|
||||
case TCPOPT_WINDOW:
|
||||
if (opsize == TCPOLEN_WINDOW && ctx->ptr + TCPOLEN_WINDOW <= ctx->data_end)
|
||||
ctx->wscale = ctx->ptr[2] < TCP_MAX_WSCALE ? ctx->ptr[2] : TCP_MAX_WSCALE;
|
||||
wscale = next(ctx, 1);
|
||||
if (!wscale)
|
||||
return 1;
|
||||
if (*opsize == TCPOLEN_WINDOW)
|
||||
ctx->wscale = *wscale < TCP_MAX_WSCALE ? *wscale : TCP_MAX_WSCALE;
|
||||
break;
|
||||
case TCPOPT_TIMESTAMP:
|
||||
if (opsize == TCPOLEN_TIMESTAMP && ctx->ptr + TCPOLEN_TIMESTAMP <= ctx->data_end) {
|
||||
tsecr = next(ctx, 4);
|
||||
if (!tsecr)
|
||||
return 1;
|
||||
if (*opsize == TCPOLEN_TIMESTAMP) {
|
||||
ctx->option_timestamp = true;
|
||||
/* Client's tsval becomes our tsecr. */
|
||||
*ctx->tsecr = get_unaligned((__be32 *)(ctx->ptr + 2));
|
||||
*ctx->tsecr = get_unaligned((__be32 *)tsecr);
|
||||
}
|
||||
break;
|
||||
case TCPOPT_SACK_PERM:
|
||||
if (opsize == TCPOLEN_SACK_PERM)
|
||||
if (*opsize == TCPOLEN_SACK_PERM)
|
||||
ctx->option_sack = true;
|
||||
break;
|
||||
}
|
||||
|
||||
ctx->ptr += opsize;
|
||||
ctx->off = off + *opsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -256,16 +271,21 @@ static int tscookie_tcpopt_parse_batch(__u32 index, void *context)
|
||||
|
||||
static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
|
||||
__u16 tcp_len, __be32 *tsval,
|
||||
__be32 *tsecr, void *data_end)
|
||||
__be32 *tsecr, void *data, void *data_end)
|
||||
{
|
||||
struct tcpopt_context loop_ctx = {
|
||||
.ptr = (__u8 *)(tcp_header + 1),
|
||||
.end = (__u8 *)tcp_header + tcp_len,
|
||||
.data = data,
|
||||
.data_end = data_end,
|
||||
.tsecr = tsecr,
|
||||
.wscale = TS_OPT_WSCALE_MASK,
|
||||
.option_timestamp = false,
|
||||
.option_sack = false,
|
||||
/* Note: currently verifier would track .off as unbound scalar.
|
||||
* In case if verifier would at some point get smarter and
|
||||
* compute bounded value for this var, beware that it might
|
||||
* hinder bpf_loop() convergence validation.
|
||||
*/
|
||||
.off = (__u8 *)(tcp_header + 1) - (__u8 *)data,
|
||||
};
|
||||
u32 cookie;
|
||||
|
||||
@ -635,7 +655,7 @@ static __always_inline int syncookie_handle_syn(struct header_pointers *hdr,
|
||||
cookie = (__u32)value;
|
||||
|
||||
if (tscookie_init((void *)hdr->tcp, hdr->tcp_len,
|
||||
&tsopt_buf[0], &tsopt_buf[1], data_end))
|
||||
&tsopt_buf[0], &tsopt_buf[1], data, data_end))
|
||||
tsopt = tsopt_buf;
|
||||
|
||||
/* Check that there is enough space for a SYNACK. It also covers
|
||||
|
Loading…
Reference in New Issue
Block a user