2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end mangling, bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R3 pointer arithmetic on pkt_end",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end mangling, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R3 pointer arithmetic on pkt_end",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_data' > pkt_end, corner case, good access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' > pkt_end, bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' > pkt_end, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' > pkt_end, corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end > pkt_data', good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_end > pkt_data', corner case -1, bad access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end > pkt_data', bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end > pkt_data', corner case, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end > pkt_data', corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' < pkt_end, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' < pkt_end, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_data' < pkt_end, corner case, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' < pkt_end, corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end < pkt_data', corner case, good access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end < pkt_data', bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end < pkt_data', bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end < pkt_data', corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end < pkt_data', corner case -1, bad access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' >= pkt_end, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_data' >= pkt_end, corner case, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end >= pkt_data', corner case, good access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end >= pkt_data', bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end >= pkt_data', bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_end >= pkt_data', corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' <= pkt_end, corner case, good access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end <= pkt_data', good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end <= pkt_data', bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_end <= pkt_data', corner case, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_end <= pkt_data', corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' > pkt_data, corner case, good access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data > pkt_meta', good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data > pkt_meta', bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data > pkt_meta', corner case, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data > pkt_meta', corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' < pkt_data, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_meta' < pkt_data, corner case, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data < pkt_meta', corner case, good access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data < pkt_meta', bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data < pkt_meta', bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data < pkt_meta', corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' >= pkt_data, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_meta' >= pkt_data, corner case, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data >= pkt_meta', corner case, good access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' <= pkt_data, corner case, good access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data <= pkt_meta', good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
2021-12-07 08:15:21 +00:00
|
|
|
"XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access",
|
2019-01-25 23:24:44 +00:00
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
|
bpf: Fix the off-by-two error in range markings
The first commit cited below attempts to fix the off-by-one error that
appeared in some comparisons with an open range. Due to this error,
arithmetically equivalent pieces of code could get different verdicts
from the verifier, for example (pseudocode):
// 1. Passes the verifier:
if (data + 8 > data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
// 2. Rejected by the verifier (should still pass):
if (data + 7 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The attempted fix, however, shifts the range by one in a wrong
direction, so the bug not only remains, but also such piece of code
starts failing in the verifier:
// 3. Rejected by the verifier, but the check is stricter than in #1.
if (data + 8 >= data_end)
return early
read *(u64 *)data, i.e. [data; data+7]
The change performed by that fix converted an off-by-one bug into
off-by-two. The second commit cited below added the BPF selftests
written to ensure than code chunks like #3 are rejected, however,
they should be accepted.
This commit fixes the off-by-two error by adjusting new_range in the
right direction and fixes the tests by changing the range into the
one that should actually fail.
Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
2021-11-30 18:16:07 +00:00
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
|
2019-01-25 23:24:44 +00:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "R1 offset is outside of the packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2021-12-07 08:15:21 +00:00
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data <= pkt_meta', corner case, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_meta)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|