mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
bpf: use bpf_prog_pack for bpf_dispatcher
Allocate bpf_dispatcher with bpf_prog_pack_alloc so that bpf_dispatcher can share pages with bpf programs. arch_prepare_bpf_dispatcher() is updated to provide a RW buffer as working area for arch code to write to. This also fixes CPA W^X warnning like: CPA refuse W^X violation: 8000000000000163 -> 0000000000000163 range: ... Signed-off-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/r/20220926184739.3512547-2-song@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
30b8fdbbe3
commit
19c02415da
@ -2242,7 +2242,7 @@ cleanup:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
|
||||
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
|
||||
{
|
||||
u8 *jg_reloc, *prog = *pprog;
|
||||
int pivot, err, jg_bytes = 1;
|
||||
@ -2258,12 +2258,12 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
|
||||
EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
|
||||
progs[a]);
|
||||
err = emit_cond_near_jump(&prog, /* je func */
|
||||
(void *)progs[a], prog,
|
||||
(void *)progs[a], image + (prog - buf),
|
||||
X86_JE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
emit_indirect_jump(&prog, 2 /* rdx */, prog);
|
||||
emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
|
||||
|
||||
*pprog = prog;
|
||||
return 0;
|
||||
@ -2288,7 +2288,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
|
||||
jg_reloc = prog;
|
||||
|
||||
err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
|
||||
progs);
|
||||
progs, image, buf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -2302,7 +2302,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
|
||||
emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
|
||||
|
||||
err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
|
||||
b, progs);
|
||||
b, progs, image, buf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -2322,12 +2322,12 @@ static int cmp_ips(const void *a, const void *b)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
|
||||
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
|
||||
{
|
||||
u8 *prog = image;
|
||||
u8 *prog = buf;
|
||||
|
||||
sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
|
||||
return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
|
||||
return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
|
||||
}
|
||||
|
||||
struct x64_jit_data {
|
||||
|
@ -946,6 +946,7 @@ struct bpf_dispatcher {
|
||||
struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
|
||||
int num_progs;
|
||||
void *image;
|
||||
void *rw_image;
|
||||
u32 image_off;
|
||||
struct bpf_ksym ksym;
|
||||
};
|
||||
@ -964,7 +965,7 @@ int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampolin
|
||||
struct bpf_trampoline *bpf_trampoline_get(u64 key,
|
||||
struct bpf_attach_target_info *tgt_info);
|
||||
void bpf_trampoline_put(struct bpf_trampoline *tr);
|
||||
int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
|
||||
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
|
||||
#define BPF_DISPATCHER_INIT(_name) { \
|
||||
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
|
||||
.func = &_name##_func, \
|
||||
|
@ -1023,6 +1023,8 @@ extern long bpf_jit_limit_max;
|
||||
|
||||
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
|
||||
|
||||
void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
|
||||
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
unsigned int alignment,
|
||||
@ -1035,6 +1037,9 @@ void bpf_jit_free(struct bpf_prog *fp);
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
|
||||
|
||||
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
|
||||
void bpf_prog_pack_free(struct bpf_binary_header *hdr);
|
||||
|
||||
static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
|
||||
{
|
||||
return list_empty(&fp->aux->ksym.lnode) ||
|
||||
|
@ -825,6 +825,11 @@ struct bpf_prog_pack {
|
||||
unsigned long bitmap[];
|
||||
};
|
||||
|
||||
void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
|
||||
{
|
||||
memset(area, 0, size);
|
||||
}
|
||||
|
||||
#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
|
||||
|
||||
static DEFINE_MUTEX(pack_mutex);
|
||||
@ -864,7 +869,7 @@ static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_ins
|
||||
return pack;
|
||||
}
|
||||
|
||||
static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
|
||||
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
|
||||
{
|
||||
unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
|
||||
struct bpf_prog_pack *pack;
|
||||
@ -905,7 +910,7 @@ out:
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
|
||||
void bpf_prog_pack_free(struct bpf_binary_header *hdr)
|
||||
{
|
||||
struct bpf_prog_pack *pack = NULL, *tmp;
|
||||
unsigned int nbits;
|
||||
|
@ -85,12 +85,12 @@ static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d,
|
||||
return false;
|
||||
}
|
||||
|
||||
int __weak arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
|
||||
int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image)
|
||||
static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
|
||||
{
|
||||
s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
|
||||
int i;
|
||||
@ -99,12 +99,12 @@ static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image)
|
||||
if (d->progs[i].prog)
|
||||
*ipsp++ = (s64)(uintptr_t)d->progs[i].prog->bpf_func;
|
||||
}
|
||||
return arch_prepare_bpf_dispatcher(image, &ips[0], d->num_progs);
|
||||
return arch_prepare_bpf_dispatcher(image, buf, &ips[0], d->num_progs);
|
||||
}
|
||||
|
||||
static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
|
||||
{
|
||||
void *old, *new;
|
||||
void *old, *new, *tmp;
|
||||
u32 noff;
|
||||
int err;
|
||||
|
||||
@ -117,8 +117,14 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
|
||||
}
|
||||
|
||||
new = d->num_progs ? d->image + noff : NULL;
|
||||
tmp = d->num_progs ? d->rw_image + noff : NULL;
|
||||
if (new) {
|
||||
if (bpf_dispatcher_prepare(d, new))
|
||||
/* Prepare the dispatcher in d->rw_image. Then use
|
||||
* bpf_arch_text_copy to update d->image, which is RO+X.
|
||||
*/
|
||||
if (bpf_dispatcher_prepare(d, new, tmp))
|
||||
return;
|
||||
if (IS_ERR(bpf_arch_text_copy(new, tmp, PAGE_SIZE / 2)))
|
||||
return;
|
||||
}
|
||||
|
||||
@ -140,9 +146,18 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
|
||||
|
||||
mutex_lock(&d->mutex);
|
||||
if (!d->image) {
|
||||
d->image = bpf_jit_alloc_exec_page();
|
||||
d->image = bpf_prog_pack_alloc(PAGE_SIZE, bpf_jit_fill_hole_with_zero);
|
||||
if (!d->image)
|
||||
goto out;
|
||||
d->rw_image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (!d->rw_image) {
|
||||
u32 size = PAGE_SIZE;
|
||||
|
||||
bpf_arch_text_copy(d->image, &size, sizeof(size));
|
||||
bpf_prog_pack_free((struct bpf_binary_header *)d->image);
|
||||
d->image = NULL;
|
||||
goto out;
|
||||
}
|
||||
bpf_image_ksym_add(d->image, &d->ksym);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user