mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
4f292c4de4
* Randomize the per-cpu entry areas Cleanups: * Have CR3_ADDR_MASK use PHYSICAL_PAGE_MASK instead of open coding it * Move to "native" set_memory_rox() helper * Clean up pmd_get_atomic() and i386-PAE * Remove some unused page table size macros -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEV76QKkVc4xCGURexaDWVMHDJkrAFAmOc53UACgkQaDWVMHDJ krCUHw//SGZ+La0hLZLAiAiZTXLZZHpYkOmg1Oj1+11qSU11uZzTFqDpauhaKpRS cJCSh+D+RXe5e2ipgt0+Zl0hESLt7pJf8258OE4ra0DL/IlyO9uqruAs9Kn3eRS/ Fk76nG8gdEU+JKJqpG02GqOLslYQuIy96n9hpuj1x25b614+uezPfC7S4XEat0NT MbJQ+jnVDf16aJIJkzT+iSwhubDVeh+bSHeO0SSCzX23WLUqDeg5NvlyxoCHGbBh UpUTWggV/0pYAkBKRHToeJs8qTWREwuuH/8JGewpe9A0tjdB5wyZfNL2PuracweN 9MauXC3T5f0+Ca4yIIaPq1fF7Ny/PR2dBFihk27rOD0N7tjaZxNwal2pB1sZcmvZ +PAokjyTPVH5ZXjkMYGGAUe1jyjwr2+TgFSZxhTnDuGtyVQiY4pihGKOifLCX6tv x6khvYeTBw7wfaDRtKEAf+2kLHYn+71HszHP/8bNKX9T03h+Zf0i1wdZu5xbM5Gc VK2wR7bCC+UftJJYG0pldcHg2qaF19RBHK2tLwp7zngUv7lTbkKfkgKjre73KV2a D4b76lrqdUMo6UYwYdw7WtDyarZS4OVLq2DcNhwwMddBCaX8kyN5a4AqwQlZYJ0u dM+kuMofE8U3yMxmMhJimkZUsj09yLHIqfynY0jbAcU3nhKZZNY= =wwVF -----END PGP SIGNATURE----- Merge tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 mm updates from Dave Hansen: "New Feature: - Randomize the per-cpu entry areas Cleanups: - Have CR3_ADDR_MASK use PHYSICAL_PAGE_MASK instead of open coding it - Move to "native" set_memory_rox() helper - Clean up pmd_get_atomic() and i386-PAE - Remove some unused page table size macros" * tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits) x86/mm: Ensure forced page table splitting x86/kasan: Populate shadow for shared chunk of the CPU entry area x86/kasan: Add helpers to align shadow addresses up and down x86/kasan: Rename local CPU_ENTRY_AREA variables to shorten names x86/mm: Populate KASAN shadow for entire per-CPU range of CPU entry area x86/mm: Recompute physical address for every page of per-CPU CEA mapping x86/mm: Rename __change_page_attr_set_clr(.checkalias) x86/mm: Inhibit _PAGE_NX changes from cpa_process_alias() x86/mm: Untangle __change_page_attr_set_clr(.checkalias) x86/mm: Add a few comments x86/mm: Fix CR3_ADDR_MASK x86/mm: Remove P*D_PAGE_MASK and P*D_PAGE_SIZE macros mm: Convert __HAVE_ARCH_P..P_GET to the new style mm: Remove pointless barrier() after pmdp_get_lockless() x86/mm/pae: Get rid of set_64bit() x86_64: Remove pointless set_64bit() usage x86/mm/pae: Be consistent with pXXp_get_and_clear() x86/mm/pae: Use WRITE_ONCE() x86/mm/pae: Don't (ab)use atomic64 mm/gup: Fix the lockless PMD access ...
216 lines
5.2 KiB
C
216 lines
5.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2021. Huawei Technologies Co., Ltd
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/bpf_verifier.h>
|
|
#include <linux/bpf.h>
|
|
#include <linux/btf.h>
|
|
|
|
extern struct bpf_struct_ops bpf_bpf_dummy_ops;
|
|
|
|
/* A common type for test_N with return value in bpf_dummy_ops */
|
|
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
|
|
|
|
struct bpf_dummy_ops_test_args {
|
|
u64 args[MAX_BPF_FUNC_ARGS];
|
|
struct bpf_dummy_ops_state state;
|
|
};
|
|
|
|
static struct bpf_dummy_ops_test_args *
|
|
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
|
|
{
|
|
__u32 size_in;
|
|
struct bpf_dummy_ops_test_args *args;
|
|
void __user *ctx_in;
|
|
void __user *u_state;
|
|
|
|
size_in = kattr->test.ctx_size_in;
|
|
if (size_in != sizeof(u64) * nr)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
args = kzalloc(sizeof(*args), GFP_KERNEL);
|
|
if (!args)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
|
|
if (copy_from_user(args->args, ctx_in, size_in))
|
|
goto out;
|
|
|
|
/* args[0] is 0 means state argument of test_N will be NULL */
|
|
u_state = u64_to_user_ptr(args->args[0]);
|
|
if (u_state && copy_from_user(&args->state, u_state,
|
|
sizeof(args->state)))
|
|
goto out;
|
|
|
|
return args;
|
|
out:
|
|
kfree(args);
|
|
return ERR_PTR(-EFAULT);
|
|
}
|
|
|
|
static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
|
|
{
|
|
void __user *u_state;
|
|
|
|
u_state = u64_to_user_ptr(args->args[0]);
|
|
if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
|
|
{
|
|
dummy_ops_test_ret_fn test = (void *)image;
|
|
struct bpf_dummy_ops_state *state = NULL;
|
|
|
|
/* state needs to be NULL if args[0] is 0 */
|
|
if (args->args[0])
|
|
state = &args->state;
|
|
return test(state, args->args[1], args->args[2],
|
|
args->args[3], args->args[4]);
|
|
}
|
|
|
|
extern const struct bpf_link_ops bpf_struct_ops_link_lops;
|
|
|
|
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|
union bpf_attr __user *uattr)
|
|
{
|
|
const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
|
|
const struct btf_type *func_proto;
|
|
struct bpf_dummy_ops_test_args *args;
|
|
struct bpf_tramp_links *tlinks;
|
|
struct bpf_tramp_link *link = NULL;
|
|
void *image = NULL;
|
|
unsigned int op_idx;
|
|
int prog_ret;
|
|
int err;
|
|
|
|
if (prog->aux->attach_btf_id != st_ops->type_id)
|
|
return -EOPNOTSUPP;
|
|
|
|
func_proto = prog->aux->attach_func_proto;
|
|
args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
|
|
if (IS_ERR(args))
|
|
return PTR_ERR(args);
|
|
|
|
tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
|
|
if (!tlinks) {
|
|
err = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
image = bpf_jit_alloc_exec(PAGE_SIZE);
|
|
if (!image) {
|
|
err = -ENOMEM;
|
|
goto out;
|
|
}
|
|
set_vm_flush_reset_perms(image);
|
|
|
|
link = kzalloc(sizeof(*link), GFP_USER);
|
|
if (!link) {
|
|
err = -ENOMEM;
|
|
goto out;
|
|
}
|
|
/* prog doesn't take the ownership of the reference from caller */
|
|
bpf_prog_inc(prog);
|
|
bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
|
|
|
|
op_idx = prog->expected_attach_type;
|
|
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
|
|
&st_ops->func_models[op_idx],
|
|
image, image + PAGE_SIZE);
|
|
if (err < 0)
|
|
goto out;
|
|
|
|
set_memory_rox((long)image, 1);
|
|
prog_ret = dummy_ops_call_op(image, args);
|
|
|
|
err = dummy_ops_copy_args(args);
|
|
if (err)
|
|
goto out;
|
|
if (put_user(prog_ret, &uattr->test.retval))
|
|
err = -EFAULT;
|
|
out:
|
|
kfree(args);
|
|
bpf_jit_free_exec(image);
|
|
if (link)
|
|
bpf_link_put(&link->link);
|
|
kfree(tlinks);
|
|
return err;
|
|
}
|
|
|
|
static int bpf_dummy_init(struct btf *btf)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static bool bpf_dummy_ops_is_valid_access(int off, int size,
|
|
enum bpf_access_type type,
|
|
const struct bpf_prog *prog,
|
|
struct bpf_insn_access_aux *info)
|
|
{
|
|
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
|
|
}
|
|
|
|
static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
|
|
const struct bpf_reg_state *reg,
|
|
int off, int size, enum bpf_access_type atype,
|
|
u32 *next_btf_id,
|
|
enum bpf_type_flag *flag)
|
|
{
|
|
const struct btf_type *state;
|
|
const struct btf_type *t;
|
|
s32 type_id;
|
|
int err;
|
|
|
|
type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
|
|
BTF_KIND_STRUCT);
|
|
if (type_id < 0)
|
|
return -EINVAL;
|
|
|
|
t = btf_type_by_id(reg->btf, reg->btf_id);
|
|
state = btf_type_by_id(reg->btf, type_id);
|
|
if (t != state) {
|
|
bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
|
|
return -EACCES;
|
|
}
|
|
|
|
err = btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
return atype == BPF_READ ? err : NOT_INIT;
|
|
}
|
|
|
|
static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
|
|
.is_valid_access = bpf_dummy_ops_is_valid_access,
|
|
.btf_struct_access = bpf_dummy_ops_btf_struct_access,
|
|
};
|
|
|
|
static int bpf_dummy_init_member(const struct btf_type *t,
|
|
const struct btf_member *member,
|
|
void *kdata, const void *udata)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static int bpf_dummy_reg(void *kdata)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static void bpf_dummy_unreg(void *kdata)
|
|
{
|
|
}
|
|
|
|
struct bpf_struct_ops bpf_bpf_dummy_ops = {
|
|
.verifier_ops = &bpf_dummy_verifier_ops,
|
|
.init = bpf_dummy_init,
|
|
.init_member = bpf_dummy_init_member,
|
|
.reg = bpf_dummy_reg,
|
|
.unreg = bpf_dummy_unreg,
|
|
.name = "bpf_dummy_ops",
|
|
};
|