Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2019-02-16 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) fix lockdep false positive in bpf_get_stackid(), from Alexei. 2) several AF_XDP fixes, from Bjorn, Magnus, Davidlohr. 3) fix narrow load from struct bpf_sock, from Martin. 4) mips JIT fixes, from Paul. 5) gso handling fix in bpf helpers, from Willem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6e1077f514
@ -79,8 +79,6 @@ enum reg_val_type {
|
||||
REG_64BIT_32BIT,
|
||||
/* 32-bit compatible, need truncation for 64-bit ops. */
|
||||
REG_32BIT,
|
||||
/* 32-bit zero extended. */
|
||||
REG_32BIT_ZERO_EX,
|
||||
/* 32-bit no sign/zero extension needed. */
|
||||
REG_32BIT_POS
|
||||
};
|
||||
@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
|
||||
const struct bpf_prog *prog = ctx->skf;
|
||||
int stack_adjust = ctx->stack_size;
|
||||
int store_offset = stack_adjust - 8;
|
||||
enum reg_val_type td;
|
||||
int r0 = MIPS_R_V0;
|
||||
|
||||
if (dest_reg == MIPS_R_RA &&
|
||||
get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
|
||||
if (dest_reg == MIPS_R_RA) {
|
||||
/* Don't let zero extended value escape. */
|
||||
emit_instr(ctx, sll, r0, r0, 0);
|
||||
td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
|
||||
if (td == REG_64BIT)
|
||||
emit_instr(ctx, sll, r0, r0, 0);
|
||||
}
|
||||
|
||||
if (ctx->flags & EBPF_SAVE_RA) {
|
||||
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
|
||||
@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
if (dst < 0)
|
||||
return dst;
|
||||
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
|
||||
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
|
||||
if (td == REG_64BIT) {
|
||||
/* sign extend */
|
||||
emit_instr(ctx, sll, dst, dst, 0);
|
||||
}
|
||||
@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
if (dst < 0)
|
||||
return dst;
|
||||
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
|
||||
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
|
||||
if (td == REG_64BIT) {
|
||||
/* sign extend */
|
||||
emit_instr(ctx, sll, dst, dst, 0);
|
||||
}
|
||||
@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
if (dst < 0)
|
||||
return dst;
|
||||
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
|
||||
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
|
||||
if (td == REG_64BIT)
|
||||
/* sign extend */
|
||||
emit_instr(ctx, sll, dst, dst, 0);
|
||||
if (insn->imm == 1) {
|
||||
@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
if (src < 0 || dst < 0)
|
||||
return -EINVAL;
|
||||
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
|
||||
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
|
||||
if (td == REG_64BIT) {
|
||||
/* sign extend */
|
||||
emit_instr(ctx, sll, dst, dst, 0);
|
||||
}
|
||||
did_move = false;
|
||||
ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
|
||||
if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
|
||||
if (ts == REG_64BIT) {
|
||||
int tmp_reg = MIPS_R_AT;
|
||||
|
||||
if (bpf_op == BPF_MOV) {
|
||||
@ -1254,8 +1255,7 @@ jeq_common:
|
||||
if (insn->imm == 64 && td == REG_32BIT)
|
||||
emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
|
||||
|
||||
if (insn->imm != 64 &&
|
||||
(td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
|
||||
if (insn->imm != 64 && td == REG_64BIT) {
|
||||
/* sign extend */
|
||||
emit_instr(ctx, sll, dst, dst, 0);
|
||||
}
|
||||
|
@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
|
||||
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
|
||||
}
|
||||
|
||||
static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
|
||||
{
|
||||
return skb_is_gso(skb) &&
|
||||
skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
|
||||
}
|
||||
|
||||
static inline void skb_gso_reset(struct sk_buff *skb)
|
||||
{
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
|
@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
|
||||
struct stack_map_irq_work *work;
|
||||
|
||||
work = container_of(entry, struct stack_map_irq_work, irq_work);
|
||||
up_read(work->sem);
|
||||
up_read_non_owner(work->sem);
|
||||
work->sem = NULL;
|
||||
}
|
||||
|
||||
@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
||||
} else {
|
||||
work->sem = ¤t->mm->mmap_sem;
|
||||
irq_work_queue(&work->irq_work);
|
||||
/*
|
||||
* The irq_work will release the mmap_sem with
|
||||
* up_read_non_owner(). The rwsem_release() is called
|
||||
* here to release the lock from lockdep's perspective.
|
||||
*/
|
||||
rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
|
||||
int size, enum bpf_access_type t)
|
||||
static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
|
||||
u32 regno, int off, int size,
|
||||
enum bpf_access_type t)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
struct bpf_reg_state *reg = ®s[regno];
|
||||
struct bpf_insn_access_aux info;
|
||||
struct bpf_insn_access_aux info = {};
|
||||
|
||||
if (reg->smin_value < 0) {
|
||||
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
||||
@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
verbose(env, "cannot write into socket\n");
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_sock_access(env, regno, off, size, t);
|
||||
err = check_sock_access(env, insn_idx, regno, off, size, t);
|
||||
if (!err && value_regno >= 0)
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else {
|
||||
|
@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
|
||||
u32 off = skb_mac_header_len(skb);
|
||||
int ret;
|
||||
|
||||
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
|
||||
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = skb_cow(skb, len_diff);
|
||||
@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
|
||||
u32 off = skb_mac_header_len(skb);
|
||||
int ret;
|
||||
|
||||
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
|
||||
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = skb_unclone(skb, GFP_ATOMIC);
|
||||
@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
|
||||
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
|
||||
int ret;
|
||||
|
||||
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
|
||||
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = skb_cow(skb, len_diff);
|
||||
@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
|
||||
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
|
||||
int ret;
|
||||
|
||||
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
|
||||
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = skb_unclone(skb, GFP_ATOMIC);
|
||||
|
@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
||||
return 0;
|
||||
|
||||
err_unreg_umem:
|
||||
xdp_clear_umem_at_qid(dev, queue_id);
|
||||
if (!force_zc)
|
||||
err = 0; /* fallback to copy mode */
|
||||
if (err)
|
||||
xdp_clear_umem_at_qid(dev, queue_id);
|
||||
out_rtnl_unlock:
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
|
||||
if (!umem->pgs)
|
||||
return -ENOMEM;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
npgs = get_user_pages(umem->address, umem->npgs,
|
||||
gup_flags, &umem->pgs[0], NULL);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
npgs = get_user_pages_longterm(umem->address, umem->npgs,
|
||||
gup_flags, &umem->pgs[0], NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
if (npgs != umem->npgs) {
|
||||
if (npgs >= 0) {
|
||||
|
@ -669,6 +669,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
|
||||
if (!umem)
|
||||
return -EINVAL;
|
||||
|
||||
/* Matches the smp_wmb() in XDP_UMEM_REG */
|
||||
smp_rmb();
|
||||
if (offset == XDP_UMEM_PGOFF_FILL_RING)
|
||||
q = READ_ONCE(umem->fq);
|
||||
else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
|
||||
@ -678,6 +680,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
|
||||
if (!q)
|
||||
return -EINVAL;
|
||||
|
||||
/* Matches the smp_wmb() in xsk_init_queue */
|
||||
smp_rmb();
|
||||
qpg = virt_to_head_page(q->ring);
|
||||
if (size > (PAGE_SIZE << compound_order(qpg)))
|
||||
return -EINVAL;
|
||||
|
Loading…
Reference in New Issue
Block a user