mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
bpf: Add bitwise atomic instructions
This adds instructions for atomic[64]_[fetch_]and atomic[64]_[fetch_]or atomic[64]_[fetch_]xor All these operations are isomorphic enough to implement with the same verifier, interpreter, and x86 JIT code, hence being a single commit. The main interesting thing here is that x86 doesn't directly support the fetch_ version these operations, so we need to generate a CMPXCHG loop in the JIT. This requires the use of two temporary registers, IIUC it's safe to use BPF_REG_AX and x86's AUX_REG for this purpose. Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20210114181751.768687-10-jackmanb@google.com
This commit is contained in:
parent
462910670e
commit
981f94c3e9
@ -808,6 +808,10 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
|
||||
/* emit opcode */
|
||||
switch (atomic_op) {
|
||||
case BPF_ADD:
|
||||
case BPF_SUB:
|
||||
case BPF_AND:
|
||||
case BPF_OR:
|
||||
case BPF_XOR:
|
||||
/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
|
||||
EMIT1(simple_alu_opcodes[atomic_op]);
|
||||
break;
|
||||
@ -1292,8 +1296,52 @@ st: if (is_imm8(insn->off))
|
||||
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
if (insn->imm == (BPF_AND | BPF_FETCH) ||
|
||||
insn->imm == (BPF_OR | BPF_FETCH) ||
|
||||
insn->imm == (BPF_XOR | BPF_FETCH)) {
|
||||
u8 *branch_target;
|
||||
bool is64 = BPF_SIZE(insn->code) == BPF_DW;
|
||||
|
||||
/*
|
||||
* Can't be implemented with a single x86 insn.
|
||||
* Need to do a CMPXCHG loop.
|
||||
*/
|
||||
|
||||
/* Will need RAX as a CMPXCHG operand so save R0 */
|
||||
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
|
||||
branch_target = prog;
|
||||
/* Load old value */
|
||||
emit_ldx(&prog, BPF_SIZE(insn->code),
|
||||
BPF_REG_0, dst_reg, insn->off);
|
||||
/*
|
||||
* Perform the (commutative) operation locally,
|
||||
* put the result in the AUX_REG.
|
||||
*/
|
||||
emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
|
||||
maybe_emit_mod(&prog, AUX_REG, src_reg, is64);
|
||||
EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
|
||||
add_2reg(0xC0, AUX_REG, src_reg));
|
||||
/* Attempt to swap in new value */
|
||||
err = emit_atomic(&prog, BPF_CMPXCHG,
|
||||
dst_reg, AUX_REG, insn->off,
|
||||
BPF_SIZE(insn->code));
|
||||
if (WARN_ON(err))
|
||||
return err;
|
||||
/*
|
||||
* ZF tells us whether we won the race. If it's
|
||||
* cleared we need to try again.
|
||||
*/
|
||||
EMIT2(X86_JNE, -(prog - branch_target) - 2);
|
||||
/* Return the pre-modification value */
|
||||
emit_mov_reg(&prog, is64, src_reg, BPF_REG_0);
|
||||
/* Restore R0 after clobbering RAX */
|
||||
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
|
||||
insn->off, BPF_SIZE(insn->code));
|
||||
insn->off, BPF_SIZE(insn->code));
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
|
@ -264,7 +264,13 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
||||
* Atomic operations:
|
||||
*
|
||||
* BPF_ADD *(uint *) (dst_reg + off16) += src_reg
|
||||
* BPF_AND *(uint *) (dst_reg + off16) &= src_reg
|
||||
* BPF_OR *(uint *) (dst_reg + off16) |= src_reg
|
||||
* BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
|
||||
* BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
|
||||
* BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
|
||||
* BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
|
||||
* BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
|
||||
* BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
|
||||
* BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
|
||||
*/
|
||||
|
@ -1642,6 +1642,9 @@ out:
|
||||
STX_ATOMIC_W:
|
||||
switch (IMM) {
|
||||
ATOMIC_ALU_OP(BPF_ADD, add)
|
||||
ATOMIC_ALU_OP(BPF_AND, and)
|
||||
ATOMIC_ALU_OP(BPF_OR, or)
|
||||
ATOMIC_ALU_OP(BPF_XOR, xor)
|
||||
#undef ATOMIC_ALU_OP
|
||||
|
||||
case BPF_XCHG:
|
||||
|
@ -80,6 +80,13 @@ const char *const bpf_alu_string[16] = {
|
||||
[BPF_END >> 4] = "endian",
|
||||
};
|
||||
|
||||
static const char *const bpf_atomic_alu_string[16] = {
|
||||
[BPF_ADD >> 4] = "add",
|
||||
[BPF_AND >> 4] = "and",
|
||||
[BPF_OR >> 4] = "or",
|
||||
[BPF_XOR >> 4] = "or",
|
||||
};
|
||||
|
||||
static const char *const bpf_ldst_string[] = {
|
||||
[BPF_W >> 3] = "u32",
|
||||
[BPF_H >> 3] = "u16",
|
||||
@ -154,17 +161,23 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
||||
insn->dst_reg,
|
||||
insn->off, insn->src_reg);
|
||||
else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
||||
insn->imm == BPF_ADD) {
|
||||
verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
|
||||
(insn->imm == BPF_ADD || insn->imm == BPF_ADD ||
|
||||
insn->imm == BPF_OR || insn->imm == BPF_XOR)) {
|
||||
verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) %s r%d\n",
|
||||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg, insn->off,
|
||||
bpf_alu_string[BPF_OP(insn->imm) >> 4],
|
||||
insn->src_reg);
|
||||
} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
||||
insn->imm == (BPF_ADD | BPF_FETCH)) {
|
||||
verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_add((%s *)(r%d %+d), r%d)\n",
|
||||
(insn->imm == (BPF_ADD | BPF_FETCH) ||
|
||||
insn->imm == (BPF_AND | BPF_FETCH) ||
|
||||
insn->imm == (BPF_OR | BPF_FETCH) ||
|
||||
insn->imm == (BPF_XOR | BPF_FETCH))) {
|
||||
verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_%s((%s *)(r%d %+d), r%d)\n",
|
||||
insn->code, insn->src_reg,
|
||||
BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
|
||||
bpf_atomic_alu_string[BPF_OP(insn->imm) >> 4],
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg, insn->off, insn->src_reg);
|
||||
} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
||||
|
@ -3612,6 +3612,12 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
|
||||
switch (insn->imm) {
|
||||
case BPF_ADD:
|
||||
case BPF_ADD | BPF_FETCH:
|
||||
case BPF_AND:
|
||||
case BPF_AND | BPF_FETCH:
|
||||
case BPF_OR:
|
||||
case BPF_OR | BPF_FETCH:
|
||||
case BPF_XOR:
|
||||
case BPF_XOR | BPF_FETCH:
|
||||
case BPF_XCHG:
|
||||
case BPF_CMPXCHG:
|
||||
break;
|
||||
|
@ -173,7 +173,13 @@
|
||||
* Atomic operations:
|
||||
*
|
||||
* BPF_ADD *(uint *) (dst_reg + off16) += src_reg
|
||||
* BPF_AND *(uint *) (dst_reg + off16) &= src_reg
|
||||
* BPF_OR *(uint *) (dst_reg + off16) |= src_reg
|
||||
* BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
|
||||
* BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
|
||||
* BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
|
||||
* BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
|
||||
* BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
|
||||
* BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
|
||||
* BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user