mirror of
https://github.com/torvalds/linux.git
synced 2024-12-31 23:31:29 +00:00
bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB
BPF programs of BPF_PROG_TYPE_CGROUP_SKB need to access headers in the skb. This patch enables direct access of skb for these programs. Two helper functions bpf_compute_and_save_data_end() and bpf_restore_data_end() are introduced. There are used in __cgroup_bpf_run_filter_skb(), to compute proper data_end for the BPF program, and restore original data afterwards. Signed-off-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
2929ad29a3
commit
b39b5f411d
@ -548,6 +548,27 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
|
|||||||
cb->data_end = skb->data + skb_headlen(skb);
|
cb->data_end = skb->data + skb_headlen(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Similar to bpf_compute_data_pointers(), except that save orginal
|
||||||
|
* data in cb->data and cb->meta_data for restore.
|
||||||
|
*/
|
||||||
|
static inline void bpf_compute_and_save_data_end(
|
||||||
|
struct sk_buff *skb, void **saved_data_end)
|
||||||
|
{
|
||||||
|
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
|
||||||
|
|
||||||
|
*saved_data_end = cb->data_end;
|
||||||
|
cb->data_end = skb->data + skb_headlen(skb);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Restore data saved by bpf_compute_data_pointers(). */
|
||||||
|
static inline void bpf_restore_data_end(
|
||||||
|
struct sk_buff *skb, void *saved_data_end)
|
||||||
|
{
|
||||||
|
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
|
||||||
|
|
||||||
|
cb->data_end = saved_data_end;
|
||||||
|
}
|
||||||
|
|
||||||
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
/* eBPF programs may read/write skb->cb[] area to transfer meta
|
/* eBPF programs may read/write skb->cb[] area to transfer meta
|
||||||
|
@ -553,6 +553,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
|||||||
{
|
{
|
||||||
unsigned int offset = skb->data - skb_network_header(skb);
|
unsigned int offset = skb->data - skb_network_header(skb);
|
||||||
struct sock *save_sk;
|
struct sock *save_sk;
|
||||||
|
void *saved_data_end;
|
||||||
struct cgroup *cgrp;
|
struct cgroup *cgrp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -566,8 +567,13 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
|||||||
save_sk = skb->sk;
|
save_sk = skb->sk;
|
||||||
skb->sk = sk;
|
skb->sk = sk;
|
||||||
__skb_push(skb, offset);
|
__skb_push(skb, offset);
|
||||||
|
|
||||||
|
/* compute pointers for the bpf prog */
|
||||||
|
bpf_compute_and_save_data_end(skb, &saved_data_end);
|
||||||
|
|
||||||
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
|
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
|
||||||
bpf_prog_run_save_cb);
|
bpf_prog_run_save_cb);
|
||||||
|
bpf_restore_data_end(skb, saved_data_end);
|
||||||
__skb_pull(skb, offset);
|
__skb_pull(skb, offset);
|
||||||
skb->sk = save_sk;
|
skb->sk = save_sk;
|
||||||
return ret == 1 ? 0 : -EPERM;
|
return ret == 1 ? 0 : -EPERM;
|
||||||
|
@ -5352,6 +5352,40 @@ static bool sk_filter_is_valid_access(int off, int size,
|
|||||||
return bpf_skb_is_valid_access(off, size, type, prog, info);
|
return bpf_skb_is_valid_access(off, size, type, prog, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool cg_skb_is_valid_access(int off, int size,
|
||||||
|
enum bpf_access_type type,
|
||||||
|
const struct bpf_prog *prog,
|
||||||
|
struct bpf_insn_access_aux *info)
|
||||||
|
{
|
||||||
|
switch (off) {
|
||||||
|
case bpf_ctx_range(struct __sk_buff, tc_classid):
|
||||||
|
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||||
|
case bpf_ctx_range(struct __sk_buff, flow_keys):
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (type == BPF_WRITE) {
|
||||||
|
switch (off) {
|
||||||
|
case bpf_ctx_range(struct __sk_buff, mark):
|
||||||
|
case bpf_ctx_range(struct __sk_buff, priority):
|
||||||
|
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (off) {
|
||||||
|
case bpf_ctx_range(struct __sk_buff, data):
|
||||||
|
info->reg_type = PTR_TO_PACKET;
|
||||||
|
break;
|
||||||
|
case bpf_ctx_range(struct __sk_buff, data_end):
|
||||||
|
info->reg_type = PTR_TO_PACKET_END;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return bpf_skb_is_valid_access(off, size, type, prog, info);
|
||||||
|
}
|
||||||
|
|
||||||
static bool lwt_is_valid_access(int off, int size,
|
static bool lwt_is_valid_access(int off, int size,
|
||||||
enum bpf_access_type type,
|
enum bpf_access_type type,
|
||||||
const struct bpf_prog *prog,
|
const struct bpf_prog *prog,
|
||||||
@ -7044,7 +7078,7 @@ const struct bpf_prog_ops xdp_prog_ops = {
|
|||||||
|
|
||||||
const struct bpf_verifier_ops cg_skb_verifier_ops = {
|
const struct bpf_verifier_ops cg_skb_verifier_ops = {
|
||||||
.get_func_proto = cg_skb_func_proto,
|
.get_func_proto = cg_skb_func_proto,
|
||||||
.is_valid_access = sk_filter_is_valid_access,
|
.is_valid_access = cg_skb_is_valid_access,
|
||||||
.convert_ctx_access = bpf_convert_ctx_access,
|
.convert_ctx_access = bpf_convert_ctx_access,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user