2016-09-21 10:44:00 +00:00
|
|
|
/*
|
nfp: bpf: support backward jump
This patch adds support for backward jump on NFP.
- restrictions on backward jump in various functions have been removed.
- nfp_fixup_branches now supports backward jump.
There is one thing to note, currently an input eBPF JMP insn may generate
several NFP insns, for example,
NFP imm move insn A \
NFP compare insn B --> 3 NFP insn jited from eBPF JMP insn M
NFP branch insn C /
---
NFP insn X --> 1 NFP insn jited from eBPF insn N
---
...
therefore, we are doing sanity check to make sure the last jited insn from
an eBPF JMP is a NFP branch instruction.
Once backward jump is allowed, it is possible an eBPF JMP insn is at the
end of the program. This is however causing trouble for the sanity check.
Because the sanity check requires the end index of the NFP insns jited from
one eBPF insn while only the start index is recorded before this patch that
we can only get the end index by:
start_index_of_the_next_eBPF_insn - 1
or for the above example:
start_index_of_eBPF_insn_N (which is the index of NFP insn X) - 1
nfp_fixup_branches was using nfp_for_each_insn_walk2 to expose *next* insn
to each iteration during the traversal so the last index could be
calculated from which. Now, it needs some extra code to handle the last
insn. Meanwhile, the use of walk2 is actually unnecessary, we could simply
use generic single instruction walk to do this, the next insn could be
easily calculated using list_next_entry.
So, this patch migrates the jump fixup traversal method to
*list_for_each_entry*, this simplifies the code logic a little bit.
The other thing to note is a new state variable "last_bpf_off" is
introduced to track the index of the last jited NFP insn. This is necessary
because NFP is generating special purposes epilogue sequences, so the index
of the last jited NFP insn is *not* always nfp_prog->prog_len - 1.
Suggested-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2017-12-01 05:32:49 +00:00
|
|
|
* Copyright (C) 2016-2017 Netronome Systems, Inc.
|
2016-09-21 10:44:00 +00:00
|
|
|
*
|
|
|
|
* This software is dual licensed under the GNU General License Version 2,
|
|
|
|
* June 1991 as shown in the file COPYING in the top-level directory of this
|
|
|
|
* source tree or the BSD 2-Clause License provided below. You have the
|
|
|
|
* option to license this software under the complete terms of either license.
|
|
|
|
*
|
|
|
|
* The BSD 2-Clause License:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __NFP_BPF_H__
|
|
|
|
#define __NFP_BPF_H__ 1
|
|
|
|
|
|
|
|
#include <linux/bitfield.h>
|
|
|
|
#include <linux/bpf.h>
|
2017-10-12 17:34:16 +00:00
|
|
|
#include <linux/bpf_verifier.h>
|
2018-01-12 04:29:11 +00:00
|
|
|
#include <linux/kernel.h>
|
2016-09-21 10:44:00 +00:00
|
|
|
#include <linux/list.h>
|
2018-01-12 04:29:11 +00:00
|
|
|
#include <linux/skbuff.h>
|
2016-09-21 10:44:00 +00:00
|
|
|
#include <linux/types.h>
|
2018-01-12 04:29:11 +00:00
|
|
|
#include <linux/wait.h>
|
2016-09-21 10:44:00 +00:00
|
|
|
|
2017-10-09 04:04:05 +00:00
|
|
|
#include "../nfp_asm.h"
|
2018-01-12 04:29:11 +00:00
|
|
|
#include "fw.h"
|
2017-05-31 15:06:50 +00:00
|
|
|
|
2018-01-10 12:26:01 +00:00
|
|
|
/* For relocation logic use up-most byte of branch instruction as scratch
|
2016-09-21 10:44:00 +00:00
|
|
|
* area. Remember to clear this before sending instructions to HW!
|
|
|
|
*/
|
2018-01-10 12:26:01 +00:00
|
|
|
#define OP_RELO_TYPE 0xff00000000000000ULL
|
|
|
|
|
|
|
|
enum nfp_relo_type {
|
|
|
|
RELO_NONE = 0,
|
|
|
|
/* standard internal jumps */
|
|
|
|
RELO_BR_REL,
|
|
|
|
/* internal jumps to parts of the outro */
|
|
|
|
RELO_BR_GO_OUT,
|
|
|
|
RELO_BR_GO_ABORT,
|
|
|
|
/* external jumps to fixed addresses */
|
|
|
|
RELO_BR_NEXT_PKT,
|
2018-01-12 04:29:15 +00:00
|
|
|
RELO_BR_HELPER,
|
|
|
|
/* immediate relocation against load address */
|
|
|
|
RELO_IMMED_REL,
|
2016-09-21 10:44:00 +00:00
|
|
|
};
|
|
|
|
|
2018-01-10 12:26:03 +00:00
|
|
|
/* To make absolute relocated branches (branches other than RELO_BR_REL)
|
|
|
|
* distinguishable in user space dumps from normal jumps, add a large offset
|
|
|
|
* to them.
|
|
|
|
*/
|
|
|
|
#define BR_OFF_RELO 15000
|
|
|
|
|
2016-09-21 10:44:00 +00:00
|
|
|
enum static_regs {
|
2017-10-09 04:04:13 +00:00
|
|
|
STATIC_REG_IMM = 21, /* Bank AB */
|
2017-10-23 18:58:11 +00:00
|
|
|
STATIC_REG_STACK = 22, /* Bank A */
|
2017-10-09 04:04:13 +00:00
|
|
|
STATIC_REG_PKT_LEN = 22, /* Bank B */
|
|
|
|
};
|
|
|
|
|
|
|
|
enum pkt_vec {
|
|
|
|
PKT_VEC_PKT_LEN = 0,
|
|
|
|
PKT_VEC_PKT_PTR = 2,
|
2016-09-21 10:44:00 +00:00
|
|
|
};
|
|
|
|
|
2017-10-09 04:04:13 +00:00
|
|
|
#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
|
|
|
|
#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
|
|
|
|
|
2017-10-23 18:58:11 +00:00
|
|
|
#define stack_reg(np) reg_a(STATIC_REG_STACK)
|
|
|
|
#define stack_imm(np) imm_b(np)
|
2017-10-09 04:04:13 +00:00
|
|
|
#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
|
|
|
|
#define pptr_reg(np) pv_ctm_ptr(np)
|
|
|
|
#define imm_a(np) reg_a(STATIC_REG_IMM)
|
|
|
|
#define imm_b(np) reg_b(STATIC_REG_IMM)
|
|
|
|
#define imm_both(np) reg_both(STATIC_REG_IMM)
|
2016-09-21 10:44:00 +00:00
|
|
|
|
2017-10-09 04:04:09 +00:00
|
|
|
#define NFP_BPF_ABI_FLAGS reg_imm(0)
|
2016-09-21 10:44:04 +00:00
|
|
|
#define NFP_BPF_ABI_FLAG_MARK 1
|
2016-09-21 10:44:00 +00:00
|
|
|
|
2017-12-15 05:29:16 +00:00
|
|
|
/**
|
|
|
|
* struct nfp_app_bpf - bpf app priv structure
|
|
|
|
* @app: backpointer to the app
|
2017-12-15 05:29:18 +00:00
|
|
|
*
|
2018-01-12 04:29:11 +00:00
|
|
|
* @tag_allocator: bitmap of control message tags in use
|
|
|
|
* @tag_alloc_next: next tag bit to allocate
|
|
|
|
* @tag_alloc_last: next tag bit to be freed
|
|
|
|
*
|
|
|
|
* @cmsg_replies: received cmsg replies waiting to be consumed
|
|
|
|
* @cmsg_wq: work queue for waiting for cmsg replies
|
|
|
|
*
|
2018-01-12 04:29:10 +00:00
|
|
|
* @map_list: list of offloaded maps
|
2018-01-12 04:29:17 +00:00
|
|
|
* @maps_in_use: number of currently offloaded maps
|
|
|
|
* @map_elems_in_use: number of elements allocated to offloaded maps
|
2018-01-12 04:29:10 +00:00
|
|
|
*
|
2017-12-15 05:29:18 +00:00
|
|
|
* @adjust_head: adjust head capability
|
2018-02-05 23:29:27 +00:00
|
|
|
* @adjust_head.flags: extra flags for adjust head
|
|
|
|
* @adjust_head.off_min: minimal packet offset within buffer required
|
|
|
|
* @adjust_head.off_max: maximum packet offset within buffer required
|
|
|
|
* @adjust_head.guaranteed_sub: negative adjustment guaranteed possible
|
|
|
|
* @adjust_head.guaranteed_add: positive adjustment guaranteed possible
|
2018-01-12 04:29:13 +00:00
|
|
|
*
|
|
|
|
* @maps: map capability
|
2018-02-05 23:29:27 +00:00
|
|
|
* @maps.types: supported map types
|
|
|
|
* @maps.max_maps: max number of maps supported
|
|
|
|
* @maps.max_elems: max number of entries in each map
|
|
|
|
* @maps.max_key_sz: max size of map key
|
|
|
|
* @maps.max_val_sz: max size of map value
|
|
|
|
* @maps.max_elem_sz: max size of map entry (key + value)
|
2018-01-12 04:29:13 +00:00
|
|
|
*
|
|
|
|
* @helpers: helper addressess for various calls
|
2018-02-05 23:29:27 +00:00
|
|
|
* @helpers.map_lookup: map lookup helper address
|
2017-12-15 05:29:16 +00:00
|
|
|
*/
|
|
|
|
struct nfp_app_bpf {
|
|
|
|
struct nfp_app *app;
|
2017-12-15 05:29:18 +00:00
|
|
|
|
2018-01-12 04:29:11 +00:00
|
|
|
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
|
|
|
|
u16 tag_alloc_next;
|
|
|
|
u16 tag_alloc_last;
|
|
|
|
|
|
|
|
struct sk_buff_head cmsg_replies;
|
|
|
|
struct wait_queue_head cmsg_wq;
|
|
|
|
|
2018-01-12 04:29:10 +00:00
|
|
|
struct list_head map_list;
|
2018-01-12 04:29:17 +00:00
|
|
|
unsigned int maps_in_use;
|
|
|
|
unsigned int map_elems_in_use;
|
2018-01-12 04:29:10 +00:00
|
|
|
|
2017-12-15 05:29:18 +00:00
|
|
|
struct nfp_bpf_cap_adjust_head {
|
|
|
|
u32 flags;
|
|
|
|
int off_min;
|
|
|
|
int off_max;
|
2017-12-15 05:29:19 +00:00
|
|
|
int guaranteed_sub;
|
|
|
|
int guaranteed_add;
|
2017-12-15 05:29:18 +00:00
|
|
|
} adjust_head;
|
2018-01-12 04:29:13 +00:00
|
|
|
|
|
|
|
struct {
|
|
|
|
u32 types;
|
|
|
|
u32 max_maps;
|
|
|
|
u32 max_elems;
|
|
|
|
u32 max_key_sz;
|
|
|
|
u32 max_val_sz;
|
|
|
|
u32 max_elem_sz;
|
|
|
|
} maps;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
u32 map_lookup;
|
|
|
|
} helpers;
|
2017-12-15 05:29:16 +00:00
|
|
|
};
|
|
|
|
|
2018-01-12 04:29:10 +00:00
|
|
|
/**
|
|
|
|
* struct nfp_bpf_map - private per-map data attached to BPF maps for offload
|
|
|
|
* @offmap: pointer to the offloaded BPF map
|
|
|
|
* @bpf: back pointer to bpf app private structure
|
|
|
|
* @tid: table id identifying map on datapath
|
|
|
|
* @l: link on the nfp_app_bpf->map_list list
|
|
|
|
*/
|
|
|
|
struct nfp_bpf_map {
|
|
|
|
struct bpf_offloaded_map *offmap;
|
|
|
|
struct nfp_app_bpf *bpf;
|
|
|
|
u32 tid;
|
|
|
|
struct list_head l;
|
|
|
|
};
|
|
|
|
|
2016-09-21 10:44:00 +00:00
|
|
|
struct nfp_prog;
|
|
|
|
struct nfp_insn_meta;
|
|
|
|
typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
|
|
|
|
|
|
|
|
#define nfp_prog_first_meta(nfp_prog) \
|
|
|
|
list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
|
|
|
|
#define nfp_prog_last_meta(nfp_prog) \
|
|
|
|
list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
|
|
|
|
#define nfp_meta_next(meta) list_next_entry(meta, l)
|
|
|
|
#define nfp_meta_prev(meta) list_prev_entry(meta, l)
|
|
|
|
|
2017-12-01 05:32:51 +00:00
|
|
|
#define FLAG_INSN_IS_JUMP_DST BIT(0)
|
|
|
|
|
2016-09-21 10:44:00 +00:00
|
|
|
/**
|
|
|
|
* struct nfp_insn_meta - BPF instruction wrapper
|
|
|
|
* @insn: BPF instruction
|
2017-10-12 17:34:16 +00:00
|
|
|
* @ptr: pointer type for memory operations
|
2017-12-01 05:32:58 +00:00
|
|
|
* @ldst_gather_len: memcpy length gathered from load/store sequence
|
|
|
|
* @paired_st: the paired store insn at the head of the sequence
|
2017-10-23 18:58:13 +00:00
|
|
|
* @ptr_not_const: pointer is not always constant
|
2018-03-29 00:48:25 +00:00
|
|
|
* @pkt_cache: packet data cache information
|
|
|
|
* @pkt_cache.range_start: start offset for associated packet data cache
|
|
|
|
* @pkt_cache.range_end: end offset for associated packet data cache
|
|
|
|
* @pkt_cache.do_init: this read needs to initialize packet data cache
|
2017-12-01 05:32:50 +00:00
|
|
|
* @jmp_dst: destination info for jump instructions
|
2018-01-12 04:29:15 +00:00
|
|
|
* @func_id: function id for call instructions
|
|
|
|
* @arg1: arg1 for call instructions
|
|
|
|
* @arg2: arg2 for call instructions
|
|
|
|
* @arg2_var_off: arg2 changes stack offset on different paths
|
2016-09-21 10:44:00 +00:00
|
|
|
* @off: index of first generated machine instruction (in nfp_prog.prog)
|
|
|
|
* @n: eBPF instruction number
|
2017-12-01 05:32:51 +00:00
|
|
|
* @flags: eBPF instruction extra optimization flags
|
2016-09-21 10:44:00 +00:00
|
|
|
* @skip: skip this instruction (optimized out)
|
|
|
|
* @double_cb: callback for second part of the instruction
|
|
|
|
* @l: link on nfp_prog->insns list
|
|
|
|
*/
|
|
|
|
struct nfp_insn_meta {
|
|
|
|
struct bpf_insn insn;
|
2017-12-01 05:32:50 +00:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
struct bpf_reg_state ptr;
|
2017-12-01 05:32:58 +00:00
|
|
|
struct bpf_insn *paired_st;
|
|
|
|
s16 ldst_gather_len;
|
2017-12-01 05:32:50 +00:00
|
|
|
bool ptr_not_const;
|
2018-03-29 00:48:25 +00:00
|
|
|
struct {
|
|
|
|
s16 range_start;
|
|
|
|
s16 range_end;
|
|
|
|
bool do_init;
|
|
|
|
} pkt_cache;
|
2017-12-01 05:32:50 +00:00
|
|
|
};
|
|
|
|
struct nfp_insn_meta *jmp_dst;
|
2018-01-12 04:29:15 +00:00
|
|
|
struct {
|
|
|
|
u32 func_id;
|
|
|
|
struct bpf_reg_state arg1;
|
|
|
|
struct bpf_reg_state arg2;
|
|
|
|
bool arg2_var_off;
|
|
|
|
};
|
2017-12-01 05:32:50 +00:00
|
|
|
};
|
2016-09-21 10:44:00 +00:00
|
|
|
unsigned int off;
|
|
|
|
unsigned short n;
|
2017-12-01 05:32:51 +00:00
|
|
|
unsigned short flags;
|
2016-09-21 10:44:00 +00:00
|
|
|
bool skip;
|
|
|
|
instr_cb_t double_cb;
|
|
|
|
|
|
|
|
struct list_head l;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define BPF_SIZE_MASK 0x18
|
|
|
|
|
|
|
|
static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return BPF_CLASS(meta->insn.code);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return BPF_SRC(meta->insn.code);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return BPF_OP(meta->insn.code);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return BPF_MODE(meta->insn.code);
|
|
|
|
}
|
|
|
|
|
2017-12-01 05:32:57 +00:00
|
|
|
static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
|
|
|
|
}
|
|
|
|
|
2018-03-29 00:48:27 +00:00
|
|
|
static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
u8 code = meta->insn.code;
|
|
|
|
|
|
|
|
return BPF_CLASS(code) == BPF_LD &&
|
|
|
|
(BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
u8 code = meta->insn.code;
|
|
|
|
|
|
|
|
return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
|
|
|
|
{
|
|
|
|
return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
|
|
|
|
}
|
|
|
|
|
2016-09-21 10:44:00 +00:00
|
|
|
/**
|
|
|
|
* struct nfp_prog - nfp BPF program
|
2017-12-15 05:29:16 +00:00
|
|
|
* @bpf: backpointer to the bpf app priv structure
|
2016-09-21 10:44:00 +00:00
|
|
|
* @prog: machine code
|
|
|
|
* @prog_len: number of valid instructions in @prog array
|
|
|
|
* @__prog_alloc_len: alloc size of @prog array
|
2017-11-03 20:56:29 +00:00
|
|
|
* @verifier_meta: temporary storage for verifier's insn meta
|
2017-11-03 20:56:22 +00:00
|
|
|
* @type: BPF program type
|
nfp: bpf: support backward jump
This patch adds support for backward jump on NFP.
- restrictions on backward jump in various functions have been removed.
- nfp_fixup_branches now supports backward jump.
There is one thing to note, currently an input eBPF JMP insn may generate
several NFP insns, for example,
NFP imm move insn A \
NFP compare insn B --> 3 NFP insn jited from eBPF JMP insn M
NFP branch insn C /
---
NFP insn X --> 1 NFP insn jited from eBPF insn N
---
...
therefore, we are doing sanity check to make sure the last jited insn from
an eBPF JMP is a NFP branch instruction.
Once backward jump is allowed, it is possible an eBPF JMP insn is at the
end of the program. This is however causing trouble for the sanity check.
Because the sanity check requires the end index of the NFP insns jited from
one eBPF insn while only the start index is recorded before this patch that
we can only get the end index by:
start_index_of_the_next_eBPF_insn - 1
or for the above example:
start_index_of_eBPF_insn_N (which is the index of NFP insn X) - 1
nfp_fixup_branches was using nfp_for_each_insn_walk2 to expose *next* insn
to each iteration during the traversal so the last index could be
calculated from which. Now, it needs some extra code to handle the last
insn. Meanwhile, the use of walk2 is actually unnecessary, we could simply
use generic single instruction walk to do this, the next insn could be
easily calculated using list_next_entry.
So, this patch migrates the jump fixup traversal method to
*list_for_each_entry*, this simplifies the code logic a little bit.
The other thing to note is a new state variable "last_bpf_off" is
introduced to track the index of the last jited NFP insn. This is necessary
because NFP is generating special purposes epilogue sequences, so the index
of the last jited NFP insn is *not* always nfp_prog->prog_len - 1.
Suggested-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2017-12-01 05:32:49 +00:00
|
|
|
* @last_bpf_off: address of the last instruction translated from BPF
|
2016-09-21 10:44:00 +00:00
|
|
|
* @tgt_out: jump target for normal exit
|
|
|
|
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
|
|
|
|
* @n_translated: number of successfully translated instructions (for errors)
|
|
|
|
* @error: error code if something went wrong
|
2017-10-23 18:58:08 +00:00
|
|
|
* @stack_depth: max stack depth from the verifier
|
2017-12-15 05:29:19 +00:00
|
|
|
* @adjust_head_location: if program has single adjust head call - the insn no.
|
2016-09-21 10:44:00 +00:00
|
|
|
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
|
|
|
|
*/
|
|
|
|
struct nfp_prog {
|
2017-12-15 05:29:16 +00:00
|
|
|
struct nfp_app_bpf *bpf;
|
|
|
|
|
2016-09-21 10:44:00 +00:00
|
|
|
u64 *prog;
|
|
|
|
unsigned int prog_len;
|
|
|
|
unsigned int __prog_alloc_len;
|
|
|
|
|
2017-11-03 20:56:29 +00:00
|
|
|
struct nfp_insn_meta *verifier_meta;
|
|
|
|
|
2017-11-03 20:56:22 +00:00
|
|
|
enum bpf_prog_type type;
|
2016-09-21 10:44:00 +00:00
|
|
|
|
nfp: bpf: support backward jump
This patch adds support for backward jump on NFP.
- restrictions on backward jump in various functions have been removed.
- nfp_fixup_branches now supports backward jump.
There is one thing to note, currently an input eBPF JMP insn may generate
several NFP insns, for example,
NFP imm move insn A \
NFP compare insn B --> 3 NFP insn jited from eBPF JMP insn M
NFP branch insn C /
---
NFP insn X --> 1 NFP insn jited from eBPF insn N
---
...
therefore, we are doing sanity check to make sure the last jited insn from
an eBPF JMP is a NFP branch instruction.
Once backward jump is allowed, it is possible an eBPF JMP insn is at the
end of the program. This is however causing trouble for the sanity check.
Because the sanity check requires the end index of the NFP insns jited from
one eBPF insn while only the start index is recorded before this patch that
we can only get the end index by:
start_index_of_the_next_eBPF_insn - 1
or for the above example:
start_index_of_eBPF_insn_N (which is the index of NFP insn X) - 1
nfp_fixup_branches was using nfp_for_each_insn_walk2 to expose *next* insn
to each iteration during the traversal so the last index could be
calculated from which. Now, it needs some extra code to handle the last
insn. Meanwhile, the use of walk2 is actually unnecessary, we could simply
use generic single instruction walk to do this, the next insn could be
easily calculated using list_next_entry.
So, this patch migrates the jump fixup traversal method to
*list_for_each_entry*, this simplifies the code logic a little bit.
The other thing to note is a new state variable "last_bpf_off" is
introduced to track the index of the last jited NFP insn. This is necessary
because NFP is generating special purposes epilogue sequences, so the index
of the last jited NFP insn is *not* always nfp_prog->prog_len - 1.
Suggested-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2017-12-01 05:32:49 +00:00
|
|
|
unsigned int last_bpf_off;
|
2016-09-21 10:44:00 +00:00
|
|
|
unsigned int tgt_out;
|
|
|
|
unsigned int tgt_abort;
|
|
|
|
|
|
|
|
unsigned int n_translated;
|
|
|
|
int error;
|
|
|
|
|
2017-10-23 18:58:08 +00:00
|
|
|
unsigned int stack_depth;
|
2017-12-15 05:29:19 +00:00
|
|
|
unsigned int adjust_head_location;
|
2017-10-23 18:58:08 +00:00
|
|
|
|
2016-09-21 10:44:00 +00:00
|
|
|
struct list_head insns;
|
|
|
|
};
|
|
|
|
|
2017-12-19 21:32:14 +00:00
|
|
|
/**
|
|
|
|
* struct nfp_bpf_vnic - per-vNIC BPF priv structure
|
|
|
|
* @tc_prog: currently loaded cls_bpf program
|
2018-01-10 12:26:01 +00:00
|
|
|
* @start_off: address of the first instruction in the memory
|
|
|
|
* @tgt_done: jump target to get the next packet
|
2017-12-19 21:32:14 +00:00
|
|
|
*/
|
|
|
|
struct nfp_bpf_vnic {
|
|
|
|
struct bpf_prog *tc_prog;
|
2018-01-10 12:26:01 +00:00
|
|
|
unsigned int start_off;
|
|
|
|
unsigned int tgt_done;
|
2017-12-19 21:32:14 +00:00
|
|
|
};
|
|
|
|
|
2018-01-10 12:25:59 +00:00
|
|
|
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
|
2017-11-03 20:56:29 +00:00
|
|
|
int nfp_bpf_jit(struct nfp_prog *prog);
|
2018-01-16 23:51:50 +00:00
|
|
|
bool nfp_bpf_supported_opcode(u8 code);
|
2016-09-21 10:44:00 +00:00
|
|
|
|
2017-12-28 02:39:05 +00:00
|
|
|
extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
|
2016-09-21 10:44:00 +00:00
|
|
|
|
2017-11-03 20:56:29 +00:00
|
|
|
struct netdev_bpf;
|
|
|
|
struct nfp_app;
|
2017-05-31 15:06:49 +00:00
|
|
|
struct nfp_net;
|
|
|
|
|
2018-01-10 12:26:04 +00:00
|
|
|
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
|
|
|
|
struct netdev_bpf *bpf);
|
2017-11-03 20:56:25 +00:00
|
|
|
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
|
2018-01-20 01:44:50 +00:00
|
|
|
bool old_prog, struct netlink_ext_ack *extack);
|
2017-05-31 15:06:49 +00:00
|
|
|
|
2017-12-01 05:32:50 +00:00
|
|
|
struct nfp_insn_meta *
|
|
|
|
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|
|
|
unsigned int insn_idx, unsigned int n_insns);
|
2018-01-10 12:26:01 +00:00
|
|
|
|
|
|
|
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
|
2018-01-12 04:29:11 +00:00
|
|
|
|
2018-01-12 04:29:12 +00:00
|
|
|
long long int
|
|
|
|
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
|
|
|
|
void
|
|
|
|
nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
|
|
|
|
int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
|
|
|
|
void *next_key);
|
|
|
|
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
|
|
|
|
void *key, void *value, u64 flags);
|
|
|
|
int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
|
|
|
|
int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
|
|
|
|
void *key, void *value);
|
|
|
|
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
|
|
|
|
void *key, void *next_key);
|
|
|
|
|
2018-01-12 04:29:11 +00:00
|
|
|
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
|
2016-09-21 10:44:00 +00:00
|
|
|
#endif
|