mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 07:31:45 +00:00
perf bpf: Clone bpf stdout events in multiple bpf scripts
This patch allows cloning bpf-output event configuration among multiple bpf scripts. If there exist a map named '__bpf_output__' and not configured using 'map:__bpf_output__.event=', this patch clones the configuration of another '__bpf_stdout__' map. For example, following command: # perf trace --ev bpf-output/no-inherit,name=evt/ \ --ev ./test_bpf_trace.c/map:__bpf_stdout__.event=evt/ \ --ev ./test_bpf_trace2.c usleep 100000 equals to: # perf trace --ev bpf-output/no-inherit,name=evt/ \ --ev ./test_bpf_trace.c/map:__bpf_stdout__.event=evt/ \ --ev ./test_bpf_trace2.c/map:__bpf_stdout__.event=evt/ \ usleep 100000 Signed-off-by: Wang Nan <wangnan0@huawei.com> Suggested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Zefan Li <lizefan@huawei.com> Cc: pi3orama@163.com Link: http://lkml.kernel.org/r/1460128045-97310-4-git-send-email-wangnan0@huawei.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
f9383452a2
commit
d78885739a
@ -1276,6 +1276,14 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = bpf__setup_stdout(rec->evlist);
|
||||
if (err) {
|
||||
bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
|
||||
pr_err("ERROR: Setup BPF stdout failed: %s\n",
|
||||
errbuf);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
symbol__init(NULL);
|
||||
|
@ -3273,6 +3273,13 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
|
||||
argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
|
||||
trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
|
||||
err = bpf__setup_stdout(trace.evlist);
|
||||
if (err) {
|
||||
bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
|
||||
pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (trace.trace_pgfaults) {
|
||||
trace.opts.sample_address = true;
|
||||
trace.opts.sample_time = true;
|
||||
|
@ -842,6 +842,58 @@ bpf_map_op__new(struct parse_events_term *term)
|
||||
return op;
|
||||
}
|
||||
|
||||
static struct bpf_map_op *
|
||||
bpf_map_op__clone(struct bpf_map_op *op)
|
||||
{
|
||||
struct bpf_map_op *newop;
|
||||
|
||||
newop = memdup(op, sizeof(*op));
|
||||
if (!newop) {
|
||||
pr_debug("Failed to alloc bpf_map_op\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&newop->list);
|
||||
if (op->key_type == BPF_MAP_KEY_RANGES) {
|
||||
size_t memsz = op->k.array.nr_ranges *
|
||||
sizeof(op->k.array.ranges[0]);
|
||||
|
||||
newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
|
||||
if (!newop->k.array.ranges) {
|
||||
pr_debug("Failed to alloc indices for map\n");
|
||||
free(newop);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return newop;
|
||||
}
|
||||
|
||||
static struct bpf_map_priv *
|
||||
bpf_map_priv__clone(struct bpf_map_priv *priv)
|
||||
{
|
||||
struct bpf_map_priv *newpriv;
|
||||
struct bpf_map_op *pos, *newop;
|
||||
|
||||
newpriv = zalloc(sizeof(*newpriv));
|
||||
if (!newpriv) {
|
||||
pr_debug("No enough memory to alloc map private\n");
|
||||
return NULL;
|
||||
}
|
||||
INIT_LIST_HEAD(&newpriv->ops_list);
|
||||
|
||||
list_for_each_entry(pos, &priv->ops_list, list) {
|
||||
newop = bpf_map_op__clone(pos);
|
||||
if (!newop) {
|
||||
bpf_map_priv__purge(newpriv);
|
||||
return NULL;
|
||||
}
|
||||
list_add_tail(&newop->list, &newpriv->ops_list);
|
||||
}
|
||||
|
||||
return newpriv;
|
||||
}
|
||||
|
||||
static int
|
||||
bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
|
||||
{
|
||||
@ -1417,6 +1469,70 @@ int bpf__apply_obj_config(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define bpf__for_each_map(pos, obj, objtmp) \
|
||||
bpf_object__for_each_safe(obj, objtmp) \
|
||||
bpf_map__for_each(pos, obj)
|
||||
|
||||
#define bpf__for_each_stdout_map(pos, obj, objtmp) \
|
||||
bpf__for_each_map(pos, obj, objtmp) \
|
||||
if (bpf_map__get_name(pos) && \
|
||||
(strcmp("__bpf_stdout__", \
|
||||
bpf_map__get_name(pos)) == 0))
|
||||
|
||||
int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
|
||||
{
|
||||
struct bpf_map_priv *tmpl_priv = NULL;
|
||||
struct bpf_object *obj, *tmp;
|
||||
struct bpf_map *map;
|
||||
int err;
|
||||
bool need_init = false;
|
||||
|
||||
bpf__for_each_stdout_map(map, obj, tmp) {
|
||||
struct bpf_map_priv *priv;
|
||||
|
||||
err = bpf_map__get_private(map, (void **)&priv);
|
||||
if (err)
|
||||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
|
||||
/*
|
||||
* No need to check map type: type should have been
|
||||
* verified by kernel.
|
||||
*/
|
||||
if (!need_init && !priv)
|
||||
need_init = !priv;
|
||||
if (!tmpl_priv && priv)
|
||||
tmpl_priv = priv;
|
||||
}
|
||||
|
||||
if (!need_init)
|
||||
return 0;
|
||||
|
||||
if (!tmpl_priv)
|
||||
return 0;
|
||||
|
||||
bpf__for_each_stdout_map(map, obj, tmp) {
|
||||
struct bpf_map_priv *priv;
|
||||
|
||||
err = bpf_map__get_private(map, (void **)&priv);
|
||||
if (err)
|
||||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
if (priv)
|
||||
continue;
|
||||
|
||||
priv = bpf_map_priv__clone(tmpl_priv);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
err = bpf_map__set_private(map, priv, bpf_map_priv__clear);
|
||||
if (err) {
|
||||
bpf_map_priv__clear(map, priv);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
|
||||
#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
|
||||
#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
|
||||
@ -1590,3 +1706,11 @@ int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
|
||||
bpf__strerror_end(buf, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
|
||||
int err, char *buf, size_t size)
|
||||
{
|
||||
bpf__strerror_head(err, buf, size);
|
||||
bpf__strerror_end(buf, size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -79,6 +79,11 @@ int bpf__strerror_config_obj(struct bpf_object *obj,
|
||||
size_t size);
|
||||
int bpf__apply_obj_config(void);
|
||||
int bpf__strerror_apply_obj_config(int err, char *buf, size_t size);
|
||||
|
||||
int bpf__setup_stdout(struct perf_evlist *evlist);
|
||||
int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err,
|
||||
char *buf, size_t size);
|
||||
|
||||
#else
|
||||
static inline struct bpf_object *
|
||||
bpf__prepare_load(const char *filename __maybe_unused,
|
||||
@ -124,6 +129,12 @@ bpf__apply_obj_config(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__bpf_strerror(char *buf, size_t size)
|
||||
{
|
||||
@ -177,5 +188,13 @@ bpf__strerror_apply_obj_config(int err __maybe_unused,
|
||||
{
|
||||
return __bpf_strerror(buf, size);
|
||||
}
|
||||
|
||||
static inline int
|
||||
bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
|
||||
int err __maybe_unused, char *buf,
|
||||
size_t size)
|
||||
{
|
||||
return __bpf_strerror(buf, size);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user