bpf: rename poke descriptor's 'ip' member to 'tailcall_target'
Reflect the actual purpose of poke->ip and rename it to poke->tailcall_target so that it will not the be confused with another poke target that will be introduced in next commit. While at it, do the same thing with poke->ip_stable - rename it to poke->tailcall_target_stable. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
a748c6975d
commit
cf71b174d3
@@ -434,7 +434,7 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
|
|||||||
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
||||||
EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
|
EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
|
||||||
|
|
||||||
poke->ip = image + (addr - X86_PATCH_SIZE);
|
poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
|
||||||
poke->adj_off = PROLOGUE_SIZE;
|
poke->adj_off = PROLOGUE_SIZE;
|
||||||
|
|
||||||
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
|
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
|
||||||
@@ -453,7 +453,7 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
|
|||||||
|
|
||||||
for (i = 0; i < prog->aux->size_poke_tab; i++) {
|
for (i = 0; i < prog->aux->size_poke_tab; i++) {
|
||||||
poke = &prog->aux->poke_tab[i];
|
poke = &prog->aux->poke_tab[i];
|
||||||
WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
|
WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
|
||||||
|
|
||||||
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
|
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
|
||||||
continue;
|
continue;
|
||||||
@@ -464,18 +464,20 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
|
|||||||
if (target) {
|
if (target) {
|
||||||
/* Plain memcpy is used when image is not live yet
|
/* Plain memcpy is used when image is not live yet
|
||||||
* and still not locked as read-only. Once poke
|
* and still not locked as read-only. Once poke
|
||||||
* location is active (poke->ip_stable), any parallel
|
* location is active (poke->tailcall_target_stable),
|
||||||
* bpf_arch_text_poke() might occur still on the
|
* any parallel bpf_arch_text_poke() might occur
|
||||||
* read-write image until we finally locked it as
|
* still on the read-write image until we finally
|
||||||
* read-only. Both modifications on the given image
|
* locked it as read-only. Both modifications on
|
||||||
* are under text_mutex to avoid interference.
|
* the given image are under text_mutex to avoid
|
||||||
|
* interference.
|
||||||
*/
|
*/
|
||||||
ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
|
ret = __bpf_arch_text_poke(poke->tailcall_target,
|
||||||
|
BPF_MOD_JUMP, NULL,
|
||||||
(u8 *)target->bpf_func +
|
(u8 *)target->bpf_func +
|
||||||
poke->adj_off, false);
|
poke->adj_off, false);
|
||||||
BUG_ON(ret < 0);
|
BUG_ON(ret < 0);
|
||||||
}
|
}
|
||||||
WRITE_ONCE(poke->ip_stable, true);
|
WRITE_ONCE(poke->tailcall_target_stable, true);
|
||||||
mutex_unlock(&array->aux->poke_mutex);
|
mutex_unlock(&array->aux->poke_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -697,14 +697,14 @@ enum bpf_jit_poke_reason {
|
|||||||
|
|
||||||
/* Descriptor of pokes pointing /into/ the JITed image. */
|
/* Descriptor of pokes pointing /into/ the JITed image. */
|
||||||
struct bpf_jit_poke_descriptor {
|
struct bpf_jit_poke_descriptor {
|
||||||
void *ip;
|
void *tailcall_target;
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
struct bpf_map *map;
|
struct bpf_map *map;
|
||||||
u32 key;
|
u32 key;
|
||||||
} tail_call;
|
} tail_call;
|
||||||
};
|
};
|
||||||
bool ip_stable;
|
bool tailcall_target_stable;
|
||||||
u8 adj_off;
|
u8 adj_off;
|
||||||
u16 reason;
|
u16 reason;
|
||||||
u32 insn_idx;
|
u32 insn_idx;
|
||||||
|
|||||||
@@ -918,12 +918,13 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
|
|||||||
* there could be danger of use after free otherwise.
|
* there could be danger of use after free otherwise.
|
||||||
* 2) Initially when we start tracking aux, the program
|
* 2) Initially when we start tracking aux, the program
|
||||||
* is not JITed yet and also does not have a kallsyms
|
* is not JITed yet and also does not have a kallsyms
|
||||||
* entry. We skip these as poke->ip_stable is not
|
* entry. We skip these as poke->tailcall_target_stable
|
||||||
* active yet. The JIT will do the final fixup before
|
* is not active yet. The JIT will do the final fixup
|
||||||
* setting it stable. The various poke->ip_stable are
|
* before setting it stable. The various
|
||||||
* successively activated, so tail call updates can
|
* poke->tailcall_target_stable are successively
|
||||||
* arrive from here while JIT is still finishing its
|
* activated, so tail call updates can arrive from here
|
||||||
* final fixup for non-activated poke entries.
|
* while JIT is still finishing its final fixup for
|
||||||
|
* non-activated poke entries.
|
||||||
* 3) On program teardown, the program's kallsym entry gets
|
* 3) On program teardown, the program's kallsym entry gets
|
||||||
* removed out of RCU callback, but we can only untrack
|
* removed out of RCU callback, but we can only untrack
|
||||||
* from sleepable context, therefore bpf_arch_text_poke()
|
* from sleepable context, therefore bpf_arch_text_poke()
|
||||||
@@ -940,7 +941,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
|
|||||||
* 5) Any other error happening below from bpf_arch_text_poke()
|
* 5) Any other error happening below from bpf_arch_text_poke()
|
||||||
* is a unexpected bug.
|
* is a unexpected bug.
|
||||||
*/
|
*/
|
||||||
if (!READ_ONCE(poke->ip_stable))
|
if (!READ_ONCE(poke->tailcall_target_stable))
|
||||||
continue;
|
continue;
|
||||||
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
|
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
|
||||||
continue;
|
continue;
|
||||||
@@ -948,7 +949,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
|
|||||||
poke->tail_call.key != key)
|
poke->tail_call.key != key)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
|
ret = bpf_arch_text_poke(poke->tailcall_target, BPF_MOD_JUMP,
|
||||||
old ? (u8 *)old->bpf_func +
|
old ? (u8 *)old->bpf_func +
|
||||||
poke->adj_off : NULL,
|
poke->adj_off : NULL,
|
||||||
new ? (u8 *)new->bpf_func +
|
new ? (u8 *)new->bpf_func +
|
||||||
|
|||||||
@@ -775,7 +775,8 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
|
|||||||
|
|
||||||
if (size > poke_tab_max)
|
if (size > poke_tab_max)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
if (poke->ip || poke->ip_stable || poke->adj_off)
|
if (poke->tailcall_target || poke->tailcall_target_stable ||
|
||||||
|
poke->adj_off)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
switch (poke->reason) {
|
switch (poke->reason) {
|
||||||
|
|||||||
Reference in New Issue
Block a user