Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2021-03-09 The following pull-request contains BPF updates for your *net-next* tree. We've added 90 non-merge commits during the last 17 day(s) which contain a total of 114 files changed, 5158 insertions(+), 1288 deletions(-). The main changes are: 1) Faster bpf_redirect_map(), from Björn. 2) skmsg cleanup, from Cong. 3) Support for floating point types in BTF, from Ilya. 4) Documentation for sys_bpf commands, from Joe. 5) Support for sk_lookup in bpf_prog_test_run, form Lorenz. 6) Enable task local storage for tracing programs, from Song. 7) bpf_for_each_map_elem() helper, from Yonghong. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -16,7 +16,6 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
|
||||
obj-y += net-sysfs.o
|
||||
obj-$(CONFIG_PAGE_POOL) += page_pool.o
|
||||
obj-$(CONFIG_PROC_FS) += net-procfs.o
|
||||
obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
|
||||
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
|
||||
obj-$(CONFIG_NETPOLL) += netpoll.o
|
||||
obj-$(CONFIG_FIB_RULES) += fib_rules.o
|
||||
@@ -28,10 +27,13 @@ obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
|
||||
obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
|
||||
obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
|
||||
obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o
|
||||
obj-$(CONFIG_BPF_STREAM_PARSER) += sock_map.o
|
||||
obj-$(CONFIG_DST_CACHE) += dst_cache.o
|
||||
obj-$(CONFIG_HWBM) += hwbm.o
|
||||
obj-$(CONFIG_NET_DEVLINK) += devlink.o
|
||||
obj-$(CONFIG_GRO_CELLS) += gro_cells.o
|
||||
obj-$(CONFIG_FAILOVER) += failover.o
|
||||
ifeq ($(CONFIG_INET),y)
|
||||
obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
|
||||
endif
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
|
||||
|
||||
@@ -89,7 +89,7 @@ static void bpf_sk_storage_map_free(struct bpf_map *map)
|
||||
|
||||
smap = (struct bpf_local_storage_map *)map;
|
||||
bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
|
||||
bpf_local_storage_map_free(smap);
|
||||
bpf_local_storage_map_free(smap, NULL);
|
||||
}
|
||||
|
||||
static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
|
||||
|
||||
@@ -1863,10 +1863,7 @@ static const struct bpf_func_proto bpf_sk_fullsock_proto = {
|
||||
static inline int sk_skb_try_make_writable(struct sk_buff *skb,
|
||||
unsigned int write_len)
|
||||
{
|
||||
int err = __bpf_try_make_writable(skb, write_len);
|
||||
|
||||
bpf_compute_data_end_sk_skb(skb);
|
||||
return err;
|
||||
return __bpf_try_make_writable(skb, write_len);
|
||||
}
|
||||
|
||||
BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
|
||||
@@ -3412,6 +3409,7 @@ static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
|
||||
BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
|
||||
BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
|
||||
BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
|
||||
BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \
|
||||
BPF_F_ADJ_ROOM_ENCAP_L2( \
|
||||
BPF_ADJ_ROOM_ENCAP_L2_MASK))
|
||||
|
||||
@@ -3448,6 +3446,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
|
||||
flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH &&
|
||||
inner_mac_len < ETH_HLEN)
|
||||
return -EINVAL;
|
||||
|
||||
if (skb->encapsulation)
|
||||
return -EALREADY;
|
||||
|
||||
@@ -3466,7 +3468,11 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
|
||||
skb->inner_mac_header = inner_net - inner_mac_len;
|
||||
skb->inner_network_header = inner_net;
|
||||
skb->inner_transport_header = inner_trans;
|
||||
skb_set_inner_protocol(skb, skb->protocol);
|
||||
|
||||
if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH)
|
||||
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
|
||||
else
|
||||
skb_set_inner_protocol(skb, skb->protocol);
|
||||
|
||||
skb->encapsulation = 1;
|
||||
skb_set_network_header(skb, mac_len);
|
||||
@@ -3577,7 +3583,6 @@ BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
|
||||
return -ENOMEM;
|
||||
__skb_pull(skb, len_diff_abs);
|
||||
}
|
||||
bpf_compute_data_end_sk_skb(skb);
|
||||
if (tls_sw_has_ctx_rx(skb->sk)) {
|
||||
struct strp_msg *rxm = strp_msg(skb);
|
||||
|
||||
@@ -3742,10 +3747,7 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = {
|
||||
BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
|
||||
u64, flags)
|
||||
{
|
||||
int ret = __bpf_skb_change_tail(skb, new_len, flags);
|
||||
|
||||
bpf_compute_data_end_sk_skb(skb);
|
||||
return ret;
|
||||
return __bpf_skb_change_tail(skb, new_len, flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto sk_skb_change_tail_proto = {
|
||||
@@ -3808,10 +3810,7 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = {
|
||||
BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
|
||||
u64, flags)
|
||||
{
|
||||
int ret = __bpf_skb_change_head(skb, head_room, flags);
|
||||
|
||||
bpf_compute_data_end_sk_skb(skb);
|
||||
return ret;
|
||||
return __bpf_skb_change_head(skb, head_room, flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto sk_skb_change_head_proto = {
|
||||
@@ -3919,23 +3918,6 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
|
||||
struct bpf_map *map, struct xdp_buff *xdp)
|
||||
{
|
||||
switch (map->map_type) {
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
return dev_map_enqueue(fwd, xdp, dev_rx);
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
return cpu_map_enqueue(fwd, xdp, dev_rx);
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
return __xsk_map_redirect(fwd, xdp);
|
||||
default:
|
||||
return -EBADRQC;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void xdp_do_flush(void)
|
||||
{
|
||||
__dev_flush();
|
||||
@@ -3944,71 +3926,52 @@ void xdp_do_flush(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_do_flush);
|
||||
|
||||
static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
|
||||
{
|
||||
switch (map->map_type) {
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
return __dev_map_lookup_elem(map, index);
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
return __dev_map_hash_lookup_elem(map, index);
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
return __cpu_map_lookup_elem(map, index);
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
return __xsk_map_lookup_elem(map, index);
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void bpf_clear_redirect_map(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_redirect_info *ri;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
ri = per_cpu_ptr(&bpf_redirect_info, cpu);
|
||||
/* Avoid polluting remote cacheline due to writes if
|
||||
* not needed. Once we pass this test, we need the
|
||||
* cmpxchg() to make sure it hasn't been changed in
|
||||
* the meantime by remote CPU.
|
||||
*/
|
||||
if (unlikely(READ_ONCE(ri->map) == map))
|
||||
cmpxchg(&ri->map, map, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
struct bpf_map *map = READ_ONCE(ri->map);
|
||||
u32 index = ri->tgt_index;
|
||||
enum bpf_map_type map_type = ri->map_type;
|
||||
void *fwd = ri->tgt_value;
|
||||
u32 map_id = ri->map_id;
|
||||
int err;
|
||||
|
||||
ri->tgt_index = 0;
|
||||
ri->tgt_value = NULL;
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
|
||||
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
|
||||
if (unlikely(!map)) {
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
||||
if (unlikely(!fwd)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
fallthrough;
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
err = dev_map_enqueue(fwd, xdp, dev);
|
||||
break;
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
err = cpu_map_enqueue(fwd, xdp, dev);
|
||||
break;
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
err = __xsk_map_redirect(fwd, xdp);
|
||||
break;
|
||||
case BPF_MAP_TYPE_UNSPEC:
|
||||
if (map_id == INT_MAX) {
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
|
||||
if (unlikely(!fwd)) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
err = dev_xdp_enqueue(fwd, xdp, dev);
|
||||
break;
|
||||
}
|
||||
|
||||
err = dev_xdp_enqueue(fwd, xdp, dev);
|
||||
} else {
|
||||
err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
|
||||
fallthrough;
|
||||
default:
|
||||
err = -EBADRQC;
|
||||
}
|
||||
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
|
||||
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
|
||||
return 0;
|
||||
err:
|
||||
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
|
||||
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_do_redirect);
|
||||
@@ -4017,41 +3980,36 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog,
|
||||
struct bpf_map *map)
|
||||
void *fwd,
|
||||
enum bpf_map_type map_type, u32 map_id)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
u32 index = ri->tgt_index;
|
||||
void *fwd = ri->tgt_value;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
ri->tgt_index = 0;
|
||||
ri->tgt_value = NULL;
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
|
||||
if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
|
||||
map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
|
||||
struct bpf_dtab_netdev *dst = fwd;
|
||||
|
||||
err = dev_map_generic_redirect(dst, skb, xdp_prog);
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
fallthrough;
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
err = dev_map_generic_redirect(fwd, skb, xdp_prog);
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||
struct xdp_sock *xs = fwd;
|
||||
|
||||
err = xsk_generic_rcv(xs, xdp);
|
||||
break;
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
err = xsk_generic_rcv(fwd, xdp);
|
||||
if (err)
|
||||
goto err;
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
break;
|
||||
default:
|
||||
/* TODO: Handle BPF_MAP_TYPE_CPUMAP */
|
||||
err = -EBADRQC;
|
||||
goto err;
|
||||
}
|
||||
|
||||
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
|
||||
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
|
||||
return 0;
|
||||
err:
|
||||
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
|
||||
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -4059,31 +4017,34 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
||||
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
struct bpf_map *map = READ_ONCE(ri->map);
|
||||
u32 index = ri->tgt_index;
|
||||
struct net_device *fwd;
|
||||
int err = 0;
|
||||
enum bpf_map_type map_type = ri->map_type;
|
||||
void *fwd = ri->tgt_value;
|
||||
u32 map_id = ri->map_id;
|
||||
int err;
|
||||
|
||||
if (map)
|
||||
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
|
||||
map);
|
||||
ri->tgt_index = 0;
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
||||
if (unlikely(!fwd)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
|
||||
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
|
||||
if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
|
||||
if (unlikely(!fwd)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = xdp_ok_fwd_dev(fwd, skb->len);
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
skb->dev = fwd;
|
||||
_trace_xdp_redirect(dev, xdp_prog, ri->tgt_index);
|
||||
generic_xdp_tx(skb, xdp_prog);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = xdp_ok_fwd_dev(fwd, skb->len);
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
skb->dev = fwd;
|
||||
_trace_xdp_redirect(dev, xdp_prog, index);
|
||||
generic_xdp_tx(skb, xdp_prog);
|
||||
return 0;
|
||||
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
|
||||
err:
|
||||
_trace_xdp_redirect_err(dev, xdp_prog, index, err);
|
||||
_trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -4094,10 +4055,12 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
|
||||
if (unlikely(flags))
|
||||
return XDP_ABORTED;
|
||||
|
||||
ri->flags = flags;
|
||||
/* NB! Map type UNSPEC and map_id == INT_MAX (never generated
|
||||
* by map_idr) is used for ifindex based XDP redirect.
|
||||
*/
|
||||
ri->tgt_index = ifindex;
|
||||
ri->tgt_value = NULL;
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
ri->map_id = INT_MAX;
|
||||
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
|
||||
return XDP_REDIRECT;
|
||||
}
|
||||
@@ -4113,28 +4076,7 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
|
||||
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
|
||||
u64, flags)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
|
||||
/* Lower bits of the flags are used as return code on lookup failure */
|
||||
if (unlikely(flags > XDP_TX))
|
||||
return XDP_ABORTED;
|
||||
|
||||
ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
|
||||
if (unlikely(!ri->tgt_value)) {
|
||||
/* If the lookup fails we want to clear out the state in the
|
||||
* redirect_info struct completely, so that if an eBPF program
|
||||
* performs multiple lookups, the last one always takes
|
||||
* precedence.
|
||||
*/
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
return flags;
|
||||
}
|
||||
|
||||
ri->flags = flags;
|
||||
ri->tgt_index = ifindex;
|
||||
WRITE_ONCE(ri->map, map);
|
||||
|
||||
return XDP_REDIRECT;
|
||||
return map->ops->map_redirect(map, ifindex, flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
|
||||
@@ -9655,22 +9597,40 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
|
||||
return insn - insn_buf;
|
||||
}
|
||||
|
||||
/* data_end = skb->data + skb_headlen() */
|
||||
static struct bpf_insn *bpf_convert_data_end_access(const struct bpf_insn *si,
|
||||
struct bpf_insn *insn)
|
||||
{
|
||||
/* si->dst_reg = skb->data */
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct sk_buff, data));
|
||||
/* AX = skb->len */
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len),
|
||||
BPF_REG_AX, si->src_reg,
|
||||
offsetof(struct sk_buff, len));
|
||||
/* si->dst_reg = skb->data + skb->len */
|
||||
*insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
|
||||
/* AX = skb->data_len */
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data_len),
|
||||
BPF_REG_AX, si->src_reg,
|
||||
offsetof(struct sk_buff, data_len));
|
||||
/* si->dst_reg = skb->data + skb->len - skb->data_len */
|
||||
*insn++ = BPF_ALU64_REG(BPF_SUB, si->dst_reg, BPF_REG_AX);
|
||||
|
||||
return insn;
|
||||
}
|
||||
|
||||
static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
|
||||
const struct bpf_insn *si,
|
||||
struct bpf_insn *insn_buf,
|
||||
struct bpf_prog *prog, u32 *target_size)
|
||||
{
|
||||
struct bpf_insn *insn = insn_buf;
|
||||
int off;
|
||||
|
||||
switch (si->off) {
|
||||
case offsetof(struct __sk_buff, data_end):
|
||||
off = si->off;
|
||||
off -= offsetof(struct __sk_buff, data_end);
|
||||
off += offsetof(struct sk_buff, cb);
|
||||
off += offsetof(struct tcp_skb_cb, bpf.data_end);
|
||||
*insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
|
||||
si->src_reg, off);
|
||||
insn = bpf_convert_data_end_access(si, insn);
|
||||
break;
|
||||
default:
|
||||
return bpf_convert_ctx_access(type, si, insn_buf, prog,
|
||||
@@ -10449,6 +10409,7 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
|
||||
}
|
||||
|
||||
const struct bpf_prog_ops sk_lookup_prog_ops = {
|
||||
.test_run = bpf_prog_test_run_sk_lookup,
|
||||
};
|
||||
|
||||
const struct bpf_verifier_ops sk_lookup_verifier_ops = {
|
||||
|
||||
212
net/core/skmsg.c
212
net/core/skmsg.c
@@ -525,7 +525,8 @@ static void sk_psock_backlog(struct work_struct *work)
|
||||
len = skb->len;
|
||||
off = 0;
|
||||
start:
|
||||
ingress = tcp_skb_bpf_ingress(skb);
|
||||
ingress = skb_bpf_ingress(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
do {
|
||||
ret = -EIO;
|
||||
if (likely(psock->sk->sk_socket))
|
||||
@@ -618,7 +619,7 @@ struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
|
||||
return link;
|
||||
}
|
||||
|
||||
void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
|
||||
static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
|
||||
{
|
||||
struct sk_msg *msg, *tmp;
|
||||
|
||||
@@ -631,7 +632,12 @@ void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
|
||||
|
||||
static void sk_psock_zap_ingress(struct sk_psock *psock)
|
||||
{
|
||||
__skb_queue_purge(&psock->ingress_skb);
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = __skb_dequeue(&psock->ingress_skb)) != NULL) {
|
||||
skb_bpf_redirect_clear(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
__sk_psock_purge_ingress_msg(psock);
|
||||
}
|
||||
|
||||
@@ -645,15 +651,15 @@ static void sk_psock_link_destroy(struct sk_psock *psock)
|
||||
}
|
||||
}
|
||||
|
||||
static void sk_psock_done_strp(struct sk_psock *psock);
|
||||
|
||||
static void sk_psock_destroy_deferred(struct work_struct *gc)
|
||||
{
|
||||
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
|
||||
|
||||
/* No sk_callback_lock since already detached. */
|
||||
|
||||
/* Parser has been stopped */
|
||||
if (psock->progs.skb_parser)
|
||||
strp_done(&psock->parser.strp);
|
||||
sk_psock_done_strp(psock);
|
||||
|
||||
cancel_work_sync(&psock->work);
|
||||
|
||||
@@ -685,9 +691,9 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
sk_psock_restore_proto(sk, psock);
|
||||
rcu_assign_sk_user_data(sk, NULL);
|
||||
if (psock->progs.skb_parser)
|
||||
if (psock->progs.stream_parser)
|
||||
sk_psock_stop_strp(sk, psock);
|
||||
else if (psock->progs.skb_verdict)
|
||||
else if (psock->progs.stream_verdict)
|
||||
sk_psock_stop_verdict(sk, psock);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
|
||||
@@ -743,27 +749,12 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
|
||||
|
||||
static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
bpf_compute_data_end_sk_skb(skb);
|
||||
return bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
}
|
||||
|
||||
static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
|
||||
{
|
||||
struct sk_psock_parser *parser;
|
||||
|
||||
parser = container_of(strp, struct sk_psock_parser, strp);
|
||||
return container_of(parser, struct sk_psock, parser);
|
||||
}
|
||||
|
||||
static void sk_psock_skb_redirect(struct sk_buff *skb)
|
||||
{
|
||||
struct sk_psock *psock_other;
|
||||
struct sock *sk_other;
|
||||
|
||||
sk_other = tcp_skb_bpf_redirect_fetch(skb);
|
||||
sk_other = skb_bpf_redirect_fetch(skb);
|
||||
/* This error is a buggy BPF program, it returned a redirect
|
||||
* return code, but then didn't set a redirect interface.
|
||||
*/
|
||||
@@ -806,16 +797,17 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
|
||||
int ret = __SK_PASS;
|
||||
|
||||
rcu_read_lock();
|
||||
prog = READ_ONCE(psock->progs.skb_verdict);
|
||||
prog = READ_ONCE(psock->progs.stream_verdict);
|
||||
if (likely(prog)) {
|
||||
/* We skip full set_owner_r here because if we do a SK_PASS
|
||||
* or SK_DROP we can skip skb memory accounting and use the
|
||||
* TLS context.
|
||||
*/
|
||||
skb->sk = psock->sk;
|
||||
tcp_skb_bpf_redirect_clear(skb);
|
||||
ret = sk_psock_bpf_run(psock, prog, skb);
|
||||
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
|
||||
skb_dst_drop(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
|
||||
skb->sk = NULL;
|
||||
}
|
||||
sk_psock_tls_verdict_apply(skb, psock->sk, ret);
|
||||
@@ -827,7 +819,6 @@ EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
|
||||
static void sk_psock_verdict_apply(struct sk_psock *psock,
|
||||
struct sk_buff *skb, int verdict)
|
||||
{
|
||||
struct tcp_skb_cb *tcp;
|
||||
struct sock *sk_other;
|
||||
int err = -EIO;
|
||||
|
||||
@@ -839,8 +830,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
tcp = TCP_SKB_CB(skb);
|
||||
tcp->bpf.flags |= BPF_F_INGRESS;
|
||||
skb_bpf_set_ingress(skb);
|
||||
|
||||
/* If the queue is empty then we can submit directly
|
||||
* into the msg queue. If its not empty we have to
|
||||
@@ -866,6 +856,24 @@ out_free:
|
||||
}
|
||||
}
|
||||
|
||||
static void sk_psock_write_space(struct sock *sk)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
void (*write_space)(struct sock *sk) = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock)) {
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
schedule_work(&psock->work);
|
||||
write_space = psock->saved_write_space;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (write_space)
|
||||
write_space(sk);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
|
||||
static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
@@ -881,11 +889,12 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
|
||||
goto out;
|
||||
}
|
||||
skb_set_owner_r(skb, sk);
|
||||
prog = READ_ONCE(psock->progs.skb_verdict);
|
||||
prog = READ_ONCE(psock->progs.stream_verdict);
|
||||
if (likely(prog)) {
|
||||
tcp_skb_bpf_redirect_clear(skb);
|
||||
ret = sk_psock_bpf_run(psock, prog, skb);
|
||||
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
|
||||
skb_dst_drop(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
|
||||
}
|
||||
sk_psock_verdict_apply(psock, skb, ret);
|
||||
out:
|
||||
@@ -899,15 +908,15 @@ static int sk_psock_strp_read_done(struct strparser *strp, int err)
|
||||
|
||||
static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_psock *psock = sk_psock_from_strp(strp);
|
||||
struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
|
||||
struct bpf_prog *prog;
|
||||
int ret = skb->len;
|
||||
|
||||
rcu_read_lock();
|
||||
prog = READ_ONCE(psock->progs.skb_parser);
|
||||
prog = READ_ONCE(psock->progs.stream_parser);
|
||||
if (likely(prog)) {
|
||||
skb->sk = psock->sk;
|
||||
ret = sk_psock_bpf_run(psock, prog, skb);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
skb->sk = NULL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@@ -923,16 +932,59 @@ static void sk_psock_strp_data_ready(struct sock *sk)
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock)) {
|
||||
if (tls_sw_has_ctx_rx(sk)) {
|
||||
psock->parser.saved_data_ready(sk);
|
||||
psock->saved_data_ready(sk);
|
||||
} else {
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
strp_data_ready(&psock->parser.strp);
|
||||
strp_data_ready(&psock->strp);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
static const struct strp_callbacks cb = {
|
||||
.rcv_msg = sk_psock_strp_read,
|
||||
.read_sock_done = sk_psock_strp_read_done,
|
||||
.parse_msg = sk_psock_strp_parse,
|
||||
};
|
||||
|
||||
return strp_init(&psock->strp, sk, &cb);
|
||||
}
|
||||
|
||||
void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
if (psock->saved_data_ready)
|
||||
return;
|
||||
|
||||
psock->saved_data_ready = sk->sk_data_ready;
|
||||
sk->sk_data_ready = sk_psock_strp_data_ready;
|
||||
sk->sk_write_space = sk_psock_write_space;
|
||||
}
|
||||
|
||||
void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
if (!psock->saved_data_ready)
|
||||
return;
|
||||
|
||||
sk->sk_data_ready = psock->saved_data_ready;
|
||||
psock->saved_data_ready = NULL;
|
||||
strp_stop(&psock->strp);
|
||||
}
|
||||
|
||||
static void sk_psock_done_strp(struct sk_psock *psock)
|
||||
{
|
||||
/* Parser has been stopped */
|
||||
if (psock->progs.stream_parser)
|
||||
strp_done(&psock->strp);
|
||||
}
|
||||
#else
|
||||
static void sk_psock_done_strp(struct sk_psock *psock)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BPF_STREAM_PARSER */
|
||||
|
||||
static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
|
||||
unsigned int offset, size_t orig_len)
|
||||
{
|
||||
@@ -957,11 +1009,12 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
|
||||
goto out;
|
||||
}
|
||||
skb_set_owner_r(skb, sk);
|
||||
prog = READ_ONCE(psock->progs.skb_verdict);
|
||||
prog = READ_ONCE(psock->progs.stream_verdict);
|
||||
if (likely(prog)) {
|
||||
tcp_skb_bpf_redirect_clear(skb);
|
||||
ret = sk_psock_bpf_run(psock, prog, skb);
|
||||
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
|
||||
skb_dst_drop(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
|
||||
}
|
||||
sk_psock_verdict_apply(psock, skb, ret);
|
||||
out:
|
||||
@@ -984,82 +1037,21 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
|
||||
sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
|
||||
}
|
||||
|
||||
static void sk_psock_write_space(struct sock *sk)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
void (*write_space)(struct sock *sk) = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock)) {
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
schedule_work(&psock->work);
|
||||
write_space = psock->saved_write_space;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (write_space)
|
||||
write_space(sk);
|
||||
}
|
||||
|
||||
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
static const struct strp_callbacks cb = {
|
||||
.rcv_msg = sk_psock_strp_read,
|
||||
.read_sock_done = sk_psock_strp_read_done,
|
||||
.parse_msg = sk_psock_strp_parse,
|
||||
};
|
||||
|
||||
psock->parser.enabled = false;
|
||||
return strp_init(&psock->parser.strp, sk, &cb);
|
||||
}
|
||||
|
||||
void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
struct sk_psock_parser *parser = &psock->parser;
|
||||
|
||||
if (parser->enabled)
|
||||
if (psock->saved_data_ready)
|
||||
return;
|
||||
|
||||
parser->saved_data_ready = sk->sk_data_ready;
|
||||
psock->saved_data_ready = sk->sk_data_ready;
|
||||
sk->sk_data_ready = sk_psock_verdict_data_ready;
|
||||
sk->sk_write_space = sk_psock_write_space;
|
||||
parser->enabled = true;
|
||||
}
|
||||
|
||||
void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
struct sk_psock_parser *parser = &psock->parser;
|
||||
|
||||
if (parser->enabled)
|
||||
return;
|
||||
|
||||
parser->saved_data_ready = sk->sk_data_ready;
|
||||
sk->sk_data_ready = sk_psock_strp_data_ready;
|
||||
sk->sk_write_space = sk_psock_write_space;
|
||||
parser->enabled = true;
|
||||
}
|
||||
|
||||
void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
struct sk_psock_parser *parser = &psock->parser;
|
||||
|
||||
if (!parser->enabled)
|
||||
return;
|
||||
|
||||
sk->sk_data_ready = parser->saved_data_ready;
|
||||
parser->saved_data_ready = NULL;
|
||||
strp_stop(&parser->strp);
|
||||
parser->enabled = false;
|
||||
}
|
||||
|
||||
void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
struct sk_psock_parser *parser = &psock->parser;
|
||||
|
||||
if (!parser->enabled)
|
||||
if (!psock->saved_data_ready)
|
||||
return;
|
||||
|
||||
sk->sk_data_ready = parser->saved_data_ready;
|
||||
parser->saved_data_ready = NULL;
|
||||
parser->enabled = false;
|
||||
sk->sk_data_ready = psock->saved_data_ready;
|
||||
psock->saved_data_ready = NULL;
|
||||
}
|
||||
|
||||
@@ -24,6 +24,9 @@ struct bpf_stab {
|
||||
#define SOCK_CREATE_FLAG_MASK \
|
||||
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
|
||||
|
||||
static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
|
||||
struct bpf_prog *old, u32 which);
|
||||
|
||||
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_stab *stab;
|
||||
@@ -148,9 +151,9 @@ static void sock_map_del_link(struct sock *sk,
|
||||
struct bpf_map *map = link->map;
|
||||
struct bpf_stab *stab = container_of(map, struct bpf_stab,
|
||||
map);
|
||||
if (psock->parser.enabled && stab->progs.skb_parser)
|
||||
if (psock->saved_data_ready && stab->progs.stream_parser)
|
||||
strp_stop = true;
|
||||
if (psock->parser.enabled && stab->progs.skb_verdict)
|
||||
if (psock->saved_data_ready && stab->progs.stream_verdict)
|
||||
verdict_stop = true;
|
||||
list_del(&link->list);
|
||||
sk_psock_free_link(link);
|
||||
@@ -224,23 +227,23 @@ out:
|
||||
static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
|
||||
struct sock *sk)
|
||||
{
|
||||
struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
|
||||
struct bpf_prog *msg_parser, *stream_parser, *stream_verdict;
|
||||
struct sk_psock *psock;
|
||||
int ret;
|
||||
|
||||
skb_verdict = READ_ONCE(progs->skb_verdict);
|
||||
if (skb_verdict) {
|
||||
skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
|
||||
if (IS_ERR(skb_verdict))
|
||||
return PTR_ERR(skb_verdict);
|
||||
stream_verdict = READ_ONCE(progs->stream_verdict);
|
||||
if (stream_verdict) {
|
||||
stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
|
||||
if (IS_ERR(stream_verdict))
|
||||
return PTR_ERR(stream_verdict);
|
||||
}
|
||||
|
||||
skb_parser = READ_ONCE(progs->skb_parser);
|
||||
if (skb_parser) {
|
||||
skb_parser = bpf_prog_inc_not_zero(skb_parser);
|
||||
if (IS_ERR(skb_parser)) {
|
||||
ret = PTR_ERR(skb_parser);
|
||||
goto out_put_skb_verdict;
|
||||
stream_parser = READ_ONCE(progs->stream_parser);
|
||||
if (stream_parser) {
|
||||
stream_parser = bpf_prog_inc_not_zero(stream_parser);
|
||||
if (IS_ERR(stream_parser)) {
|
||||
ret = PTR_ERR(stream_parser);
|
||||
goto out_put_stream_verdict;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,7 +252,7 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
|
||||
msg_parser = bpf_prog_inc_not_zero(msg_parser);
|
||||
if (IS_ERR(msg_parser)) {
|
||||
ret = PTR_ERR(msg_parser);
|
||||
goto out_put_skb_parser;
|
||||
goto out_put_stream_parser;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -261,8 +264,8 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
|
||||
|
||||
if (psock) {
|
||||
if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
|
||||
(skb_parser && READ_ONCE(psock->progs.skb_parser)) ||
|
||||
(skb_verdict && READ_ONCE(psock->progs.skb_verdict))) {
|
||||
(stream_parser && READ_ONCE(psock->progs.stream_parser)) ||
|
||||
(stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
|
||||
sk_psock_put(sk, psock);
|
||||
ret = -EBUSY;
|
||||
goto out_progs;
|
||||
@@ -283,15 +286,15 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
|
||||
goto out_drop;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
if (skb_parser && skb_verdict && !psock->parser.enabled) {
|
||||
if (stream_parser && stream_verdict && !psock->saved_data_ready) {
|
||||
ret = sk_psock_init_strp(sk, psock);
|
||||
if (ret)
|
||||
goto out_unlock_drop;
|
||||
psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
|
||||
psock_set_prog(&psock->progs.skb_parser, skb_parser);
|
||||
psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
|
||||
psock_set_prog(&psock->progs.stream_parser, stream_parser);
|
||||
sk_psock_start_strp(sk, psock);
|
||||
} else if (!skb_parser && skb_verdict && !psock->parser.enabled) {
|
||||
psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
|
||||
} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
|
||||
psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
|
||||
sk_psock_start_verdict(sk,psock);
|
||||
}
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
@@ -303,12 +306,12 @@ out_drop:
|
||||
out_progs:
|
||||
if (msg_parser)
|
||||
bpf_prog_put(msg_parser);
|
||||
out_put_skb_parser:
|
||||
if (skb_parser)
|
||||
bpf_prog_put(skb_parser);
|
||||
out_put_skb_verdict:
|
||||
if (skb_verdict)
|
||||
bpf_prog_put(skb_verdict);
|
||||
out_put_stream_parser:
|
||||
if (stream_parser)
|
||||
bpf_prog_put(stream_parser);
|
||||
out_put_stream_verdict:
|
||||
if (stream_verdict)
|
||||
bpf_prog_put(stream_verdict);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -657,7 +660,6 @@ const struct bpf_func_proto bpf_sock_map_update_proto = {
|
||||
BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
|
||||
struct bpf_map *, map, u32, key, u64, flags)
|
||||
{
|
||||
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
|
||||
struct sock *sk;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
||||
@@ -667,8 +669,7 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
|
||||
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
|
||||
return SK_DROP;
|
||||
|
||||
tcb->bpf.flags = flags;
|
||||
tcb->bpf.sk_redir = sk;
|
||||
skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
@@ -1250,7 +1251,6 @@ const struct bpf_func_proto bpf_sock_hash_update_proto = {
|
||||
BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
|
||||
struct bpf_map *, map, void *, key, u64, flags)
|
||||
{
|
||||
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
|
||||
struct sock *sk;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
||||
@@ -1260,8 +1260,7 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
|
||||
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
|
||||
return SK_DROP;
|
||||
|
||||
tcb->bpf.flags = flags;
|
||||
tcb->bpf.sk_redir = sk;
|
||||
skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
@@ -1448,8 +1447,8 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
|
||||
struct bpf_prog *old, u32 which)
|
||||
static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
|
||||
struct bpf_prog *old, u32 which)
|
||||
{
|
||||
struct sk_psock_progs *progs = sock_map_progs(map);
|
||||
struct bpf_prog **pprog;
|
||||
@@ -1461,11 +1460,13 @@ int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
|
||||
case BPF_SK_MSG_VERDICT:
|
||||
pprog = &progs->msg_parser;
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
|
||||
case BPF_SK_SKB_STREAM_PARSER:
|
||||
pprog = &progs->skb_parser;
|
||||
pprog = &progs->stream_parser;
|
||||
break;
|
||||
#endif
|
||||
case BPF_SK_SKB_STREAM_VERDICT:
|
||||
pprog = &progs->skb_verdict;
|
||||
pprog = &progs->stream_verdict;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
Reference in New Issue
Block a user