Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-11-25

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix an off-by-one bug when adjusting subprog start offsets after
   patching, from Edward.

2) Fix several bugs such as overflow in size allocation in queue /
   stack map creation, from Alexei.

3) Fix wrong IPv6 destination port byte order in bpf_sk_lookup_udp
   helper, from Andrey.

4) Fix several bugs in bpftool such as preventing an infinite loop
   in get_fdinfo, error handling and man page references, from Quentin.

5) Fix a warning in bpf_trace_printk() that wasn't catching an
   invalid format string, from Martynas.

6) Fix a bug in BPF cgroup local storage where non-atomic allocation
   was used in atomic context, from Roman.

7) Fix a NULL pointer dereference bug in bpftool from reallocarray()
   error handling, from Jakub and Wen.

8) Add a copy of pkt_cls.h and tc_bpf.h uapi headers to the tools
   include infrastructure so that bpftool compiles on older RHEL7-like
   user space which does not ship these headers, from Yonghong.

9) Fix BPF kselftests for user space where to get ping test working
   with ping6 and ping -6, from Li.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-11-25 20:04:58 -08:00
commit 6950012742
18 changed files with 752 additions and 39 deletions

View File

@ -139,7 +139,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
return -ENOENT;
new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
map->value_size, __GFP_ZERO | GFP_USER,
map->value_size,
__GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
map->numa_node);
if (!new)
return -ENOMEM;

View File

@ -7,6 +7,7 @@
#include <linux/bpf.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include "percpu_freelist.h"
#define QUEUE_STACK_CREATE_FLAG_MASK \
@ -45,8 +46,12 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
/* Called from syscall */
static int queue_stack_map_alloc_check(union bpf_attr *attr)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 0 ||
attr->value_size == 0 ||
attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
return -EINVAL;
@ -63,15 +68,10 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
{
int ret, numa_node = bpf_map_attr_numa_node(attr);
struct bpf_queue_stack *qs;
u32 size, value_size;
u64 queue_size, cost;
u64 size, queue_size, cost;
size = attr->max_entries + 1;
value_size = attr->value_size;
queue_size = sizeof(*qs) + (u64) value_size * size;
cost = queue_size;
size = (u64) attr->max_entries + 1;
cost = queue_size = sizeof(*qs) + size * attr->value_size;
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-E2BIG);

View File

@ -5650,7 +5650,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
return;
/* NOTE: fake 'exit' subprog should be updated as well. */
for (i = 0; i <= env->subprog_cnt; i++) {
if (env->subprog_info[i].start < off)
if (env->subprog_info[i].start <= off)
continue;
env->subprog_info[i].start += len - 1;
}

View File

@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
i++;
} else if (fmt[i] == 'p' || fmt[i] == 's') {
mod[fmt_cnt]++;
i++;
if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
/* disallow any further format extensions */
if (fmt[i + 1] != 0 &&
!isspace(fmt[i + 1]) &&
!ispunct(fmt[i + 1]))
return -EINVAL;
fmt_cnt++;
if (fmt[i - 1] == 's') {
if (fmt[i] == 's') {
if (str_seen)
/* allow only one '%s' per fmt string */
return -EINVAL;

View File

@ -4852,18 +4852,17 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
} else {
struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
u16 hnum = ntohs(tuple->ipv6.dport);
int sdif = inet6_sdif(skb);
if (proto == IPPROTO_TCP)
sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
src6, tuple->ipv6.sport,
dst6, hnum,
dst6, ntohs(tuple->ipv6.dport),
dif, sdif, &refcounted);
else if (likely(ipv6_bpf_stub))
sk = ipv6_bpf_stub->udp6_lib_lookup(net,
src6, tuple->ipv6.sport,
dst6, hnum,
dst6, tuple->ipv6.dport,
dif, sdif,
&udp_table, skb);
#endif

View File

@ -137,4 +137,10 @@ EXAMPLES
SEE ALSO
========
**bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
**bpf**\ (2),
**bpf-helpers**\ (7),
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)

View File

@ -171,4 +171,10 @@ The following three commands are equivalent:
SEE ALSO
========
**bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
**bpf**\ (2),
**bpf-helpers**\ (7),
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-cgroup**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)

View File

@ -136,4 +136,10 @@ EXAMPLES
SEE ALSO
========
**bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
**bpf**\ (2),
**bpf-helpers**\ (7),
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
**bpftool-perf**\ (8)

View File

@ -78,4 +78,10 @@ EXAMPLES
SEE ALSO
========
**bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
**bpf**\ (2),
**bpf-helpers**\ (7),
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
**bpftool-net**\ (8)

View File

@ -124,7 +124,8 @@ OPTIONS
Generate human-readable JSON output. Implies **-j**.
-f, --bpffs
Show file names of pinned programs.
When showing BPF programs, show file names of pinned
programs.
EXAMPLES
========
@ -206,4 +207,10 @@ EXAMPLES
SEE ALSO
========
**bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8)
**bpf**\ (2),
**bpf-helpers**\ (7),
**bpftool**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)

View File

@ -63,5 +63,10 @@ OPTIONS
SEE ALSO
========
**bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
**bpftool-perf**\ (8), **bpftool-net**\ (8)
**bpf**\ (2),
**bpf-helpers**\ (7),
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)

View File

@ -130,16 +130,17 @@ static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
return 0;
}
int open_obj_pinned(char *path)
int open_obj_pinned(char *path, bool quiet)
{
int fd;
fd = bpf_obj_get(path);
if (fd < 0) {
p_err("bpf obj get (%s): %s", path,
errno == EACCES && !is_bpffs(dirname(path)) ?
"directory not in bpf file system (bpffs)" :
strerror(errno));
if (!quiet)
p_err("bpf obj get (%s): %s", path,
errno == EACCES && !is_bpffs(dirname(path)) ?
"directory not in bpf file system (bpffs)" :
strerror(errno));
return -1;
}
@ -151,7 +152,7 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
enum bpf_obj_type type;
int fd;
fd = open_obj_pinned(path);
fd = open_obj_pinned(path, false);
if (fd < 0)
return -1;
@ -304,7 +305,7 @@ char *get_fdinfo(int fd, const char *key)
return NULL;
}
while ((n = getline(&line, &line_n, fdi))) {
while ((n = getline(&line, &line_n, fdi)) > 0) {
char *value;
int len;
@ -384,7 +385,7 @@ int build_pinned_obj_table(struct pinned_obj_table *tab,
while ((ftse = fts_read(fts))) {
if (!(ftse->fts_info & FTS_F))
continue;
fd = open_obj_pinned(ftse->fts_path);
fd = open_obj_pinned(ftse->fts_path, true);
if (fd < 0)
continue;

View File

@ -127,7 +127,7 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
int get_fd_type(int fd);
const char *get_fd_type_name(enum bpf_obj_type type);
char *get_fdinfo(int fd, const char *key);
int open_obj_pinned(char *path);
int open_obj_pinned(char *path, bool quiet);
int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
int do_pin_fd(int fd, const char *name);

View File

@ -357,10 +357,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
if (!hash_empty(prog_table.table)) {
struct pinned_obj *obj;
printf("\n");
hash_for_each_possible(prog_table.table, obj, hash, info->id) {
if (obj->id == info->id)
printf("\tpinned %s\n", obj->path);
printf("\n\tpinned %s", obj->path);
}
}
@ -845,6 +844,7 @@ static int do_load(int argc, char **argv)
}
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
void *new_map_replace;
char *endptr, *name;
int fd;
@ -878,12 +878,15 @@ static int do_load(int argc, char **argv)
if (fd < 0)
goto err_free_reuse_maps;
map_replace = reallocarray(map_replace, old_map_fds + 1,
sizeof(*map_replace));
if (!map_replace) {
new_map_replace = reallocarray(map_replace,
old_map_fds + 1,
sizeof(*map_replace));
if (!new_map_replace) {
p_err("mem alloc failed");
goto err_free_reuse_maps;
}
map_replace = new_map_replace;
map_replace[old_map_fds].idx = idx;
map_replace[old_map_fds].name = name;
map_replace[old_map_fds].fd = fd;

View File

@ -0,0 +1,612 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_PKT_CLS_H
#define __LINUX_PKT_CLS_H
#include <linux/types.h>
#include <linux/pkt_sched.h>
#define TC_COOKIE_MAX_SIZE 16
/* Action attributes */
enum {
TCA_ACT_UNSPEC,
TCA_ACT_KIND,
TCA_ACT_OPTIONS,
TCA_ACT_INDEX,
TCA_ACT_STATS,
TCA_ACT_PAD,
TCA_ACT_COOKIE,
__TCA_ACT_MAX
};
#define TCA_ACT_MAX __TCA_ACT_MAX
#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
#define TCA_ACT_MAX_PRIO 32
#define TCA_ACT_BIND 1
#define TCA_ACT_NOBIND 0
#define TCA_ACT_UNBIND 1
#define TCA_ACT_NOUNBIND 0
#define TCA_ACT_REPLACE 1
#define TCA_ACT_NOREPLACE 0
#define TC_ACT_UNSPEC (-1)
#define TC_ACT_OK 0
#define TC_ACT_RECLASSIFY 1
#define TC_ACT_SHOT 2
#define TC_ACT_PIPE 3
#define TC_ACT_STOLEN 4
#define TC_ACT_QUEUED 5
#define TC_ACT_REPEAT 6
#define TC_ACT_REDIRECT 7
#define TC_ACT_TRAP 8 /* For hw path, this means "trap to cpu"
* and don't further process the frame
* in hardware. For sw path, this is
* equivalent of TC_ACT_STOLEN - drop
* the skb and act like everything
* is alright.
*/
#define TC_ACT_VALUE_MAX TC_ACT_TRAP
/* There is a special kind of actions called "extended actions",
* which need a value parameter. These have a local opcode located in
* the highest nibble, starting from 1. The rest of the bits
* are used to carry the value. These two parts together make
* a combined opcode.
*/
#define __TC_ACT_EXT_SHIFT 28
#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
#define TC_ACT_JUMP __TC_ACT_EXT(1)
#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
/* Action type identifiers*/
enum {
TCA_ID_UNSPEC=0,
TCA_ID_POLICE=1,
/* other actions go here */
__TCA_ID_MAX=255
};
#define TCA_ID_MAX __TCA_ID_MAX
struct tc_police {
__u32 index;
int action;
#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
#define TC_POLICE_OK TC_ACT_OK
#define TC_POLICE_RECLASSIFY TC_ACT_RECLASSIFY
#define TC_POLICE_SHOT TC_ACT_SHOT
#define TC_POLICE_PIPE TC_ACT_PIPE
__u32 limit;
__u32 burst;
__u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
int refcnt;
int bindcnt;
__u32 capab;
};
struct tcf_t {
__u64 install;
__u64 lastuse;
__u64 expires;
__u64 firstuse;
};
struct tc_cnt {
int refcnt;
int bindcnt;
};
#define tc_gen \
__u32 index; \
__u32 capab; \
int action; \
int refcnt; \
int bindcnt
enum {
TCA_POLICE_UNSPEC,
TCA_POLICE_TBF,
TCA_POLICE_RATE,
TCA_POLICE_PEAKRATE,
TCA_POLICE_AVRATE,
TCA_POLICE_RESULT,
TCA_POLICE_TM,
TCA_POLICE_PAD,
__TCA_POLICE_MAX
#define TCA_POLICE_RESULT TCA_POLICE_RESULT
};
#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
/* tca flags definitions */
#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) /* don't offload filter to HW */
#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */
#define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */
#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
#define TCA_CLS_FLAGS_VERBOSE (1 << 4) /* verbose logging */
/* U32 filters */
#define TC_U32_HTID(h) ((h)&0xFFF00000)
#define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20)
#define TC_U32_HASH(h) (((h)>>12)&0xFF)
#define TC_U32_NODE(h) ((h)&0xFFF)
#define TC_U32_KEY(h) ((h)&0xFFFFF)
#define TC_U32_UNSPEC 0
#define TC_U32_ROOT (0xFFF00000)
enum {
TCA_U32_UNSPEC,
TCA_U32_CLASSID,
TCA_U32_HASH,
TCA_U32_LINK,
TCA_U32_DIVISOR,
TCA_U32_SEL,
TCA_U32_POLICE,
TCA_U32_ACT,
TCA_U32_INDEV,
TCA_U32_PCNT,
TCA_U32_MARK,
TCA_U32_FLAGS,
TCA_U32_PAD,
__TCA_U32_MAX
};
#define TCA_U32_MAX (__TCA_U32_MAX - 1)
struct tc_u32_key {
__be32 mask;
__be32 val;
int off;
int offmask;
};
struct tc_u32_sel {
unsigned char flags;
unsigned char offshift;
unsigned char nkeys;
__be16 offmask;
__u16 off;
short offoff;
short hoff;
__be32 hmask;
struct tc_u32_key keys[0];
};
struct tc_u32_mark {
__u32 val;
__u32 mask;
__u32 success;
};
struct tc_u32_pcnt {
__u64 rcnt;
__u64 rhit;
__u64 kcnts[0];
};
/* Flags */
#define TC_U32_TERMINAL 1
#define TC_U32_OFFSET 2
#define TC_U32_VAROFFSET 4
#define TC_U32_EAT 8
#define TC_U32_MAXDEPTH 8
/* RSVP filter */
enum {
TCA_RSVP_UNSPEC,
TCA_RSVP_CLASSID,
TCA_RSVP_DST,
TCA_RSVP_SRC,
TCA_RSVP_PINFO,
TCA_RSVP_POLICE,
TCA_RSVP_ACT,
__TCA_RSVP_MAX
};
#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
struct tc_rsvp_gpi {
__u32 key;
__u32 mask;
int offset;
};
struct tc_rsvp_pinfo {
struct tc_rsvp_gpi dpi;
struct tc_rsvp_gpi spi;
__u8 protocol;
__u8 tunnelid;
__u8 tunnelhdr;
__u8 pad;
};
/* ROUTE filter */
enum {
TCA_ROUTE4_UNSPEC,
TCA_ROUTE4_CLASSID,
TCA_ROUTE4_TO,
TCA_ROUTE4_FROM,
TCA_ROUTE4_IIF,
TCA_ROUTE4_POLICE,
TCA_ROUTE4_ACT,
__TCA_ROUTE4_MAX
};
#define TCA_ROUTE4_MAX (__TCA_ROUTE4_MAX - 1)
/* FW filter */
enum {
TCA_FW_UNSPEC,
TCA_FW_CLASSID,
TCA_FW_POLICE,
TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */
TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
TCA_FW_MASK,
__TCA_FW_MAX
};
#define TCA_FW_MAX (__TCA_FW_MAX - 1)
/* TC index filter */
enum {
TCA_TCINDEX_UNSPEC,
TCA_TCINDEX_HASH,
TCA_TCINDEX_MASK,
TCA_TCINDEX_SHIFT,
TCA_TCINDEX_FALL_THROUGH,
TCA_TCINDEX_CLASSID,
TCA_TCINDEX_POLICE,
TCA_TCINDEX_ACT,
__TCA_TCINDEX_MAX
};
#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1)
/* Flow filter */
enum {
FLOW_KEY_SRC,
FLOW_KEY_DST,
FLOW_KEY_PROTO,
FLOW_KEY_PROTO_SRC,
FLOW_KEY_PROTO_DST,
FLOW_KEY_IIF,
FLOW_KEY_PRIORITY,
FLOW_KEY_MARK,
FLOW_KEY_NFCT,
FLOW_KEY_NFCT_SRC,
FLOW_KEY_NFCT_DST,
FLOW_KEY_NFCT_PROTO_SRC,
FLOW_KEY_NFCT_PROTO_DST,
FLOW_KEY_RTCLASSID,
FLOW_KEY_SKUID,
FLOW_KEY_SKGID,
FLOW_KEY_VLAN_TAG,
FLOW_KEY_RXHASH,
__FLOW_KEY_MAX,
};
#define FLOW_KEY_MAX (__FLOW_KEY_MAX - 1)
enum {
FLOW_MODE_MAP,
FLOW_MODE_HASH,
};
enum {
TCA_FLOW_UNSPEC,
TCA_FLOW_KEYS,
TCA_FLOW_MODE,
TCA_FLOW_BASECLASS,
TCA_FLOW_RSHIFT,
TCA_FLOW_ADDEND,
TCA_FLOW_MASK,
TCA_FLOW_XOR,
TCA_FLOW_DIVISOR,
TCA_FLOW_ACT,
TCA_FLOW_POLICE,
TCA_FLOW_EMATCHES,
TCA_FLOW_PERTURB,
__TCA_FLOW_MAX
};
#define TCA_FLOW_MAX (__TCA_FLOW_MAX - 1)
/* Basic filter */
enum {
TCA_BASIC_UNSPEC,
TCA_BASIC_CLASSID,
TCA_BASIC_EMATCHES,
TCA_BASIC_ACT,
TCA_BASIC_POLICE,
__TCA_BASIC_MAX
};
#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
/* Cgroup classifier */
enum {
TCA_CGROUP_UNSPEC,
TCA_CGROUP_ACT,
TCA_CGROUP_POLICE,
TCA_CGROUP_EMATCHES,
__TCA_CGROUP_MAX,
};
#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
/* BPF classifier */
#define TCA_BPF_FLAG_ACT_DIRECT (1 << 0)
enum {
TCA_BPF_UNSPEC,
TCA_BPF_ACT,
TCA_BPF_POLICE,
TCA_BPF_CLASSID,
TCA_BPF_OPS_LEN,
TCA_BPF_OPS,
TCA_BPF_FD,
TCA_BPF_NAME,
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
TCA_BPF_TAG,
TCA_BPF_ID,
__TCA_BPF_MAX,
};
#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
/* Flower classifier */
enum {
TCA_FLOWER_UNSPEC,
TCA_FLOWER_CLASSID,
TCA_FLOWER_INDEV,
TCA_FLOWER_ACT,
TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
TCA_FLOWER_KEY_IP_PROTO, /* u8 */
TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
TCA_FLOWER_KEY_IPV4_DST, /* be32 */
TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
TCA_FLOWER_KEY_TCP_SRC, /* be16 */
TCA_FLOWER_KEY_TCP_DST, /* be16 */
TCA_FLOWER_KEY_UDP_SRC, /* be16 */
TCA_FLOWER_KEY_UDP_DST, /* be16 */
TCA_FLOWER_FLAGS,
TCA_FLOWER_KEY_VLAN_ID, /* be16 */
TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
TCA_FLOWER_KEY_SCTP_SRC_MASK, /* be16 */
TCA_FLOWER_KEY_SCTP_DST_MASK, /* be16 */
TCA_FLOWER_KEY_SCTP_SRC, /* be16 */
TCA_FLOWER_KEY_SCTP_DST, /* be16 */
TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, /* be16 */
TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, /* be16 */
TCA_FLOWER_KEY_ENC_UDP_DST_PORT, /* be16 */
TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, /* be16 */
TCA_FLOWER_KEY_FLAGS, /* be32 */
TCA_FLOWER_KEY_FLAGS_MASK, /* be32 */
TCA_FLOWER_KEY_ICMPV4_CODE, /* u8 */
TCA_FLOWER_KEY_ICMPV4_CODE_MASK,/* u8 */
TCA_FLOWER_KEY_ICMPV4_TYPE, /* u8 */
TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,/* u8 */
TCA_FLOWER_KEY_ICMPV6_CODE, /* u8 */
TCA_FLOWER_KEY_ICMPV6_CODE_MASK,/* u8 */
TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */
TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
TCA_FLOWER_KEY_ARP_SIP, /* be32 */
TCA_FLOWER_KEY_ARP_SIP_MASK, /* be32 */
TCA_FLOWER_KEY_ARP_TIP, /* be32 */
TCA_FLOWER_KEY_ARP_TIP_MASK, /* be32 */
TCA_FLOWER_KEY_ARP_OP, /* u8 */
TCA_FLOWER_KEY_ARP_OP_MASK, /* u8 */
TCA_FLOWER_KEY_ARP_SHA, /* ETH_ALEN */
TCA_FLOWER_KEY_ARP_SHA_MASK, /* ETH_ALEN */
TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */
TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */
TCA_FLOWER_KEY_MPLS_TTL, /* u8 - 8 bits */
TCA_FLOWER_KEY_MPLS_BOS, /* u8 - 1 bit */
TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */
TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */
TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */
TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */
TCA_FLOWER_KEY_IP_TOS, /* u8 */
TCA_FLOWER_KEY_IP_TOS_MASK, /* u8 */
TCA_FLOWER_KEY_IP_TTL, /* u8 */
TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
TCA_FLOWER_KEY_CVLAN_ID, /* be16 */
TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */
TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */
TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */
TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */
TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
TCA_FLOWER_KEY_ENC_OPTS,
TCA_FLOWER_KEY_ENC_OPTS_MASK,
TCA_FLOWER_IN_HW_COUNT,
__TCA_FLOWER_MAX,
};
#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
enum {
TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
* TCA_FLOWER_KEY_ENC_OPT_GENEVE_
* attributes
*/
__TCA_FLOWER_KEY_ENC_OPTS_MAX,
};
#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
enum {
TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */
TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
};
#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
(__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
/* Match-all classifier */
enum {
TCA_MATCHALL_UNSPEC,
TCA_MATCHALL_CLASSID,
TCA_MATCHALL_ACT,
TCA_MATCHALL_FLAGS,
__TCA_MATCHALL_MAX,
};
#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1)
/* Extended Matches */
struct tcf_ematch_tree_hdr {
__u16 nmatches;
__u16 progid;
};
enum {
TCA_EMATCH_TREE_UNSPEC,
TCA_EMATCH_TREE_HDR,
TCA_EMATCH_TREE_LIST,
__TCA_EMATCH_TREE_MAX
};
#define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1)
struct tcf_ematch_hdr {
__u16 matchid;
__u16 kind;
__u16 flags;
__u16 pad; /* currently unused */
};
/* 0 1
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
* +-----------------------+-+-+---+
* | Unused |S|I| R |
* +-----------------------+-+-+---+
*
* R(2) ::= relation to next ematch
* where: 0 0 END (last ematch)
* 0 1 AND
* 1 0 OR
* 1 1 Unused (invalid)
* I(1) ::= invert result
* S(1) ::= simple payload
*/
#define TCF_EM_REL_END 0
#define TCF_EM_REL_AND (1<<0)
#define TCF_EM_REL_OR (1<<1)
#define TCF_EM_INVERT (1<<2)
#define TCF_EM_SIMPLE (1<<3)
#define TCF_EM_REL_MASK 3
#define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK)
enum {
TCF_LAYER_LINK,
TCF_LAYER_NETWORK,
TCF_LAYER_TRANSPORT,
__TCF_LAYER_MAX
};
#define TCF_LAYER_MAX (__TCF_LAYER_MAX - 1)
/* Ematch type assignments
* 1..32767 Reserved for ematches inside kernel tree
* 32768..65535 Free to use, not reliable
*/
#define TCF_EM_CONTAINER 0
#define TCF_EM_CMP 1
#define TCF_EM_NBYTE 2
#define TCF_EM_U32 3
#define TCF_EM_META 4
#define TCF_EM_TEXT 5
#define TCF_EM_VLAN 6
#define TCF_EM_CANID 7
#define TCF_EM_IPSET 8
#define TCF_EM_IPT 9
#define TCF_EM_MAX 9
enum {
TCF_EM_PROG_TC
};
enum {
TCF_EM_OPND_EQ,
TCF_EM_OPND_GT,
TCF_EM_OPND_LT
};
#endif

View File

@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __LINUX_TC_BPF_H
#define __LINUX_TC_BPF_H
#include <linux/pkt_cls.h>
#define TCA_ACT_BPF 13
struct tc_act_bpf {
tc_gen;
};
enum {
TCA_ACT_BPF_UNSPEC,
TCA_ACT_BPF_TM,
TCA_ACT_BPF_PARMS,
TCA_ACT_BPF_OPS_LEN,
TCA_ACT_BPF_OPS,
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
TCA_ACT_BPF_PAD,
TCA_ACT_BPF_TAG,
TCA_ACT_BPF_ID,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
#endif

View File

@ -81,7 +81,10 @@ int main(int argc, char **argv)
goto err;
}
assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0);
if (system("which ping6 &>/dev/null") == 0)
assert(!system("ping6 localhost -c 10000 -f -q > /dev/null"));
else
assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null"));
if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
&prog_cnt)) {

View File

@ -13896,6 +13896,25 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: ctx read at start of subprog",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
},
};
static int probe_filter_length(const struct bpf_insn *fp)