forked from Minki/linux
perf thread_map: Don't access the array entries directly
Instead provide a method to set the array entries, and another to access the contents. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1435012588-9007-2-git-send-email-jolsa@kernel.org [ Split providing the set/get accessors from transforming the entries structs ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
7c31bb8c95
commit
e13798c77b
@ -2325,7 +2325,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
||||
*/
|
||||
if (trace->filter_pids.nr > 0)
|
||||
err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
|
||||
else if (evlist->threads->map[0] == -1)
|
||||
else if (thread_map__pid(evlist->threads, 0) == -1)
|
||||
err = perf_evlist__set_filter_pid(evlist, getpid());
|
||||
|
||||
if (err < 0) {
|
||||
@ -2343,7 +2343,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
||||
if (forks)
|
||||
perf_evlist__start_workload(evlist);
|
||||
|
||||
trace->multiple_threads = evlist->threads->map[0] == -1 ||
|
||||
trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
|
||||
evlist->threads->nr > 1 ||
|
||||
perf_evlist__first(evlist)->attr.inherit;
|
||||
again:
|
||||
|
@ -45,7 +45,7 @@ int test__syscall_openat_tp_fields(void)
|
||||
|
||||
perf_evsel__config(evsel, &opts);
|
||||
|
||||
evlist->threads->map[0] = getpid();
|
||||
thread_map__set_pid(evlist->threads, 0, getpid());
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
if (err < 0) {
|
||||
|
@ -119,12 +119,12 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
|
||||
if (per_cpu) {
|
||||
mp->cpu = evlist->cpus->map[idx];
|
||||
if (evlist->threads)
|
||||
mp->tid = evlist->threads->map[0];
|
||||
mp->tid = thread_map__pid(evlist->threads, 0);
|
||||
else
|
||||
mp->tid = -1;
|
||||
} else {
|
||||
mp->cpu = -1;
|
||||
mp->tid = evlist->threads->map[idx];
|
||||
mp->tid = thread_map__pid(evlist->threads, idx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -504,7 +504,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
|
||||
for (thread = 0; thread < threads->nr; ++thread) {
|
||||
if (__event__synthesize_thread(comm_event, mmap_event,
|
||||
fork_event,
|
||||
threads->map[thread], 0,
|
||||
thread_map__pid(threads, thread), 0,
|
||||
process, tool, machine,
|
||||
mmap_data, proc_map_timeout)) {
|
||||
err = -1;
|
||||
@ -515,12 +515,12 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
|
||||
* comm.pid is set to thread group id by
|
||||
* perf_event__synthesize_comm
|
||||
*/
|
||||
if ((int) comm_event->comm.pid != threads->map[thread]) {
|
||||
if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
|
||||
bool need_leader = true;
|
||||
|
||||
/* is thread group leader in thread_map? */
|
||||
for (j = 0; j < threads->nr; ++j) {
|
||||
if ((int) comm_event->comm.pid == threads->map[j]) {
|
||||
if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
|
||||
need_leader = false;
|
||||
break;
|
||||
}
|
||||
|
@ -548,7 +548,7 @@ static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
|
||||
else
|
||||
sid->cpu = -1;
|
||||
if (!evsel->system_wide && evlist->threads && thread >= 0)
|
||||
sid->tid = evlist->threads->map[thread];
|
||||
sid->tid = thread_map__pid(evlist->threads, thread);
|
||||
else
|
||||
sid->tid = -1;
|
||||
}
|
||||
@ -1475,7 +1475,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar
|
||||
__func__, __LINE__);
|
||||
goto out_close_pipes;
|
||||
}
|
||||
evlist->threads->map[0] = evlist->workload.pid;
|
||||
thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
|
||||
}
|
||||
|
||||
close(child_ready_pipe[1]);
|
||||
|
@ -1167,7 +1167,7 @@ retry_sample_id:
|
||||
int group_fd;
|
||||
|
||||
if (!evsel->cgrp && !evsel->system_wide)
|
||||
pid = threads->map[thread];
|
||||
pid = thread_map__pid(threads, thread);
|
||||
|
||||
group_fd = get_group_fd(evsel, cpu, thread);
|
||||
retry_open:
|
||||
|
@ -45,7 +45,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid)
|
||||
threads = thread_map__alloc(items);
|
||||
if (threads != NULL) {
|
||||
for (i = 0; i < items; i++)
|
||||
threads->map[i] = atoi(namelist[i]->d_name);
|
||||
thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
|
||||
threads->nr = items;
|
||||
}
|
||||
|
||||
@ -61,8 +61,8 @@ struct thread_map *thread_map__new_by_tid(pid_t tid)
|
||||
struct thread_map *threads = thread_map__alloc(1);
|
||||
|
||||
if (threads != NULL) {
|
||||
threads->map[0] = tid;
|
||||
threads->nr = 1;
|
||||
thread_map__set_pid(threads, 0, tid);
|
||||
threads->nr = 1;
|
||||
}
|
||||
|
||||
return threads;
|
||||
@ -123,8 +123,10 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
|
||||
threads = tmp;
|
||||
}
|
||||
|
||||
for (i = 0; i < items; i++)
|
||||
threads->map[threads->nr + i] = atoi(namelist[i]->d_name);
|
||||
for (i = 0; i < items; i++) {
|
||||
thread_map__set_pid(threads, threads->nr + i,
|
||||
atoi(namelist[i]->d_name));
|
||||
}
|
||||
|
||||
for (i = 0; i < items; i++)
|
||||
zfree(&namelist[i]);
|
||||
@ -201,7 +203,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
|
||||
threads = nt;
|
||||
|
||||
for (i = 0; i < items; i++) {
|
||||
threads->map[j++] = atoi(namelist[i]->d_name);
|
||||
thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
|
||||
zfree(&namelist[i]);
|
||||
}
|
||||
threads->nr = total_tasks;
|
||||
@ -227,8 +229,8 @@ struct thread_map *thread_map__new_dummy(void)
|
||||
struct thread_map *threads = thread_map__alloc(1);
|
||||
|
||||
if (threads != NULL) {
|
||||
threads->map[0] = -1;
|
||||
threads->nr = 1;
|
||||
thread_map__set_pid(threads, 0, -1);
|
||||
threads->nr = 1;
|
||||
}
|
||||
return threads;
|
||||
}
|
||||
@ -267,8 +269,8 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
|
||||
goto out_free_threads;
|
||||
|
||||
threads = nt;
|
||||
threads->map[ntasks - 1] = tid;
|
||||
threads->nr = ntasks;
|
||||
thread_map__set_pid(threads, ntasks - 1, tid);
|
||||
threads->nr = ntasks;
|
||||
}
|
||||
out:
|
||||
return threads;
|
||||
@ -301,7 +303,7 @@ size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
|
||||
size_t printed = fprintf(fp, "%d thread%s: ",
|
||||
threads->nr, threads->nr > 1 ? "s" : "");
|
||||
for (i = 0; i < threads->nr; ++i)
|
||||
printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]);
|
||||
printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i));
|
||||
|
||||
return printed + fprintf(fp, "\n");
|
||||
}
|
||||
|
@ -27,4 +27,14 @@ static inline int thread_map__nr(struct thread_map *threads)
|
||||
return threads ? threads->nr : 1;
|
||||
}
|
||||
|
||||
static inline pid_t thread_map__pid(struct thread_map *map, int thread)
|
||||
{
|
||||
return map->map[thread];
|
||||
}
|
||||
|
||||
static inline void
|
||||
thread_map__set_pid(struct thread_map *map, int thread, pid_t pid)
|
||||
{
|
||||
map->map[thread] = pid;
|
||||
}
|
||||
#endif /* __PERF_THREAD_MAP_H */
|
||||
|
Loading…
Reference in New Issue
Block a user