mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2021-08-07 The following pull-request contains BPF updates for your *net* tree. We've added 4 non-merge commits during the last 9 day(s) which contain a total of 4 files changed, 8 insertions(+), 7 deletions(-). The main changes are: 1) Fix integer overflow in htab's lookup + delete batch op, from Tatsuhiko Yasumatsu. 2) Fix invalid fd 0 close in libbpf if BTF parsing failed, from Daniel Xu. 3) Fix libbpf feature probe for BPF_PROG_TYPE_CGROUP_SOCKOPT, from Robin Gögge. 4) Fix minor libbpf doc warning regarding code-block language, from Randy Dunlap. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
84103209ba
@ -108,7 +108,7 @@ This bump in ABI version is at most once per kernel development cycle.
|
||||
|
||||
For example, if current state of ``libbpf.map`` is:
|
||||
|
||||
.. code-block:: c
|
||||
.. code-block:: none
|
||||
|
||||
LIBBPF_0.0.1 {
|
||||
global:
|
||||
@ -121,7 +121,7 @@ For example, if current state of ``libbpf.map`` is:
|
||||
, and a new symbol ``bpf_func_c`` is being introduced, then
|
||||
``libbpf.map`` should be changed like this:
|
||||
|
||||
.. code-block:: c
|
||||
.. code-block:: none
|
||||
|
||||
LIBBPF_0.0.1 {
|
||||
global:
|
||||
|
@ -1565,8 +1565,8 @@ alloc:
|
||||
/* We cannot do copy_from_user or copy_to_user inside
|
||||
* the rcu_read_lock. Allocate enough space here.
|
||||
*/
|
||||
keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN);
|
||||
values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN);
|
||||
keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
|
||||
values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
|
||||
if (!keys || !values) {
|
||||
ret = -ENOMEM;
|
||||
goto after_loop;
|
||||
|
@ -804,6 +804,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
|
||||
btf->nr_types = 0;
|
||||
btf->start_id = 1;
|
||||
btf->start_str_off = 0;
|
||||
btf->fd = -1;
|
||||
|
||||
if (base_btf) {
|
||||
btf->base_btf = base_btf;
|
||||
@ -832,8 +833,6 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
btf->fd = -1;
|
||||
|
||||
done:
|
||||
if (err) {
|
||||
btf__free(btf);
|
||||
|
@ -75,6 +75,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
|
||||
xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
|
||||
break;
|
||||
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
|
||||
xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
|
||||
break;
|
||||
case BPF_PROG_TYPE_SK_LOOKUP:
|
||||
xattr.expected_attach_type = BPF_SK_LOOKUP;
|
||||
break;
|
||||
@ -104,7 +107,6 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
|
||||
case BPF_PROG_TYPE_SK_REUSEPORT:
|
||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
||||
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
|
Loading…
Reference in New Issue
Block a user