mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
perf kwork top: Introduce new top utility
Some common tools for collecting statistics on CPU usage, such as top, obtain statistics from timer interrupt sampling, and then periodically read statistics from /proc/stat. This method has some deviations: 1. In the tick interrupt, the time between the last tick and the current tick is counted in the current task. However, the task may be running only part of the time. 2. For each task, the top tool periodically reads the /proc/{PID}/status information. For tasks with a short life cycle, it may be missed. In conclusion, the top tool cannot accurately collect statistics on the CPU usage and running time of tasks. The statistical method based on sched_switch tracepoint can accurately calculate the CPU usage of all tasks. This method is applicable to scenarios where performance comparison data is of high precision. Example usage: # perf kwork Usage: perf kwork [<options>] {record|report|latency|timehist|top} -D, --dump-raw-trace dump raw trace in ASCII -f, --force don't complain, do it -k, --kwork <kwork> list of kwork to profile (irq, softirq, workqueue, sched, etc) -v, --verbose be more verbose (show symbol address, etc) # perf kwork -k sched record -- perf bench sched messaging -g 1 -l 10000 # Running 'sched/messaging' benchmark: # 20 sender and receiver processes per group # 1 groups == 40 processes run Total time: 14.074 [sec] [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 15.886 MB perf.data (129472 samples) ] # perf kwork top Total : 115708.178 ms, 8 cpus %Cpu(s): 9.78% id %Cpu0 [||||||||||||||||||||||||||| 90.55%] %Cpu1 [||||||||||||||||||||||||||| 90.51%] %Cpu2 [|||||||||||||||||||||||||| 88.57%] %Cpu3 [||||||||||||||||||||||||||| 91.18%] %Cpu4 [||||||||||||||||||||||||||| 91.09%] %Cpu5 [||||||||||||||||||||||||||| 90.88%] %Cpu6 [|||||||||||||||||||||||||| 88.64%] %Cpu7 [||||||||||||||||||||||||||| 90.28%] PID %CPU RUNTIME COMMMAND ---------------------------------------------------- 4113 22.23 3221.547 ms sched-messaging 4105 21.61 3131.495 ms sched-messaging 4119 21.53 3120.937 ms sched-messaging 4103 21.39 3101.614 ms sched-messaging 4106 21.37 3095.209 ms sched-messaging 4104 21.25 3077.269 ms sched-messaging 4115 21.21 3073.188 ms sched-messaging 4109 21.18 3069.022 ms sched-messaging 4111 20.78 3010.033 ms sched-messaging 4114 20.74 3007.073 ms sched-messaging 4108 20.73 3002.137 ms sched-messaging 4107 20.47 2967.292 ms sched-messaging 4117 20.39 2955.335 ms sched-messaging 4112 20.34 2947.080 ms sched-messaging 4118 20.32 2942.519 ms sched-messaging 4121 20.23 2929.865 ms sched-messaging 4110 20.22 2930.078 ms sched-messaging 4122 20.15 2919.542 ms sched-messaging 4120 19.77 2866.032 ms sched-messaging 4116 19.72 2857.660 ms sched-messaging 4127 16.19 2346.334 ms sched-messaging 4142 15.86 2297.600 ms sched-messaging 4141 15.62 2262.646 ms sched-messaging 4136 15.41 2231.408 ms sched-messaging 4130 15.38 2227.008 ms sched-messaging 4129 15.31 2217.692 ms sched-messaging 4126 15.21 2201.711 ms sched-messaging 4139 15.19 2200.722 ms sched-messaging 4137 15.10 2188.633 ms sched-messaging 4134 15.06 2182.082 ms sched-messaging 4132 15.02 2177.530 ms sched-messaging 4131 14.73 2131.973 ms sched-messaging 4125 14.68 2125.439 ms sched-messaging 4128 14.66 2122.255 ms sched-messaging 4123 14.65 2122.113 ms sched-messaging 4135 14.56 2107.144 ms sched-messaging 4133 14.51 2103.549 ms sched-messaging 4124 14.27 2066.671 ms sched-messaging 4140 14.17 2052.251 ms sched-messaging 4138 13.81 2000.361 ms sched-messaging 0 11.42 1652.009 ms swapper/2 0 11.35 1641.694 ms swapper/6 0 9.71 1405.108 ms swapper/7 0 9.48 1372.338 ms swapper/1 0 9.44 1366.013 ms swapper/0 0 9.11 1318.382 ms swapper/5 0 8.90 1287.582 ms swapper/4 0 8.81 1274.356 ms swapper/3 4100 2.61 379.328 ms perf 4101 1.16 169.487 ms perf-exec 151 0.65 94.741 ms systemd-resolve 249 0.36 53.030 ms sd-resolve 153 0.14 21.405 ms systemd-timesyn 1 0.10 16.200 ms systemd 16 0.09 15.785 ms rcu_preempt 4102 0.06 9.727 ms perf 4095 0.03 5.464 ms kworker/7:1 98 0.02 3.231 ms jbd2/sda-8 353 0.02 4.115 ms sshd 75 0.02 3.889 ms kworker/2:1 73 0.01 1.552 ms kworker/5:1 64 0.01 1.591 ms kworker/4:1 74 0.01 1.952 ms kworker/3:1 61 0.01 2.608 ms kcompactd0 397 0.01 1.602 ms kworker/1:1 69 0.01 1.817 ms kworker/1:1H 10 0.01 2.553 ms kworker/u16:0 2909 0.01 2.684 ms kworker/0:2 1211 0.00 0.426 ms kworker/7:0 97 0.00 0.153 ms kworker/7:1H 51 0.00 0.100 ms ksoftirqd/7 120 0.00 0.856 ms systemd-journal 76 0.00 1.414 ms kworker/6:1 46 0.00 0.246 ms ksoftirqd/6 45 0.00 0.164 ms migration/6 41 0.00 0.098 ms ksoftirqd/5 40 0.00 0.207 ms migration/5 86 0.00 1.339 ms kworker/4:1H 36 0.00 0.252 ms ksoftirqd/4 35 0.00 0.090 ms migration/4 31 0.00 0.156 ms ksoftirqd/3 30 0.00 0.073 ms migration/3 26 0.00 0.180 ms ksoftirqd/2 25 0.00 0.085 ms migration/2 21 0.00 0.106 ms ksoftirqd/1 20 0.00 0.118 ms migration/1 302 0.00 1.440 ms systemd-logind 17 0.00 0.132 ms migration/0 15 0.00 0.255 ms ksoftirqd/0 Reviewed-by: Ian Rogers <irogers@google.com> Signed-off-by: Yang Jihong <yangjihong1@huawei.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Link: https://lore.kernel.org/r/20230812084917.169338-10-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
b83b5071c0
commit
55c40e5052
@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies)
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
'perf kwork' {record|report|latency|timehist}
|
||||
'perf kwork' {record|report|latency|timehist|top}
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
@ -23,6 +23,8 @@ There are several variants of 'perf kwork':
|
||||
|
||||
'perf kwork timehist' provides an analysis of kernel work events.
|
||||
|
||||
'perf kwork top' to report the task cpu usage.
|
||||
|
||||
Example usage:
|
||||
perf kwork record -- sleep 1
|
||||
perf kwork report
|
||||
@ -30,6 +32,7 @@ There are several variants of 'perf kwork':
|
||||
perf kwork latency
|
||||
perf kwork latency -b
|
||||
perf kwork timehist
|
||||
perf kwork top
|
||||
|
||||
By default it shows the individual work events such as irq, workqeueu,
|
||||
including the run time and delay (time between raise and actually entry):
|
||||
|
@ -45,6 +45,11 @@
|
||||
#define PRINT_BRACKETPAIR_WIDTH 2
|
||||
#define PRINT_TIME_UNIT_SEC_WIDTH 2
|
||||
#define PRINT_TIME_UNIT_MESC_WIDTH 3
|
||||
#define PRINT_PID_WIDTH 7
|
||||
#define PRINT_TASK_NAME_WIDTH 16
|
||||
#define PRINT_CPU_USAGE_WIDTH 6
|
||||
#define PRINT_CPU_USAGE_DECIMAL_WIDTH 2
|
||||
#define PRINT_CPU_USAGE_HIST_WIDTH 30
|
||||
#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
|
||||
#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
|
||||
#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
|
||||
@ -131,6 +136,16 @@ static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_usage_cmp(struct kwork_work *l, struct kwork_work *r)
|
||||
{
|
||||
if (l->cpu_usage > r->cpu_usage)
|
||||
return 1;
|
||||
if (l->cpu_usage < r->cpu_usage)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
|
||||
const char *tok, struct list_head *list)
|
||||
{
|
||||
@ -155,12 +170,17 @@ static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
|
||||
.name = "avg",
|
||||
.cmp = avg_latency_cmp,
|
||||
};
|
||||
static struct sort_dimension rate_sort_dimension = {
|
||||
.name = "rate",
|
||||
.cmp = cpu_usage_cmp,
|
||||
};
|
||||
struct sort_dimension *available_sorts[] = {
|
||||
&id_sort_dimension,
|
||||
&max_sort_dimension,
|
||||
&count_sort_dimension,
|
||||
&runtime_sort_dimension,
|
||||
&avg_sort_dimension,
|
||||
&rate_sort_dimension,
|
||||
};
|
||||
|
||||
if (kwork->report == KWORK_REPORT_LATENCY)
|
||||
@ -485,6 +505,38 @@ static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct kwork_work *find_work_by_id(struct rb_root_cached *root,
|
||||
u64 id, int cpu)
|
||||
{
|
||||
struct rb_node *next;
|
||||
struct kwork_work *work;
|
||||
|
||||
next = rb_first_cached(root);
|
||||
while (next) {
|
||||
work = rb_entry(next, struct kwork_work, node);
|
||||
if ((cpu != -1 && work->id == id && work->cpu == cpu) ||
|
||||
(cpu == -1 && work->id == id))
|
||||
return work;
|
||||
|
||||
next = rb_next(next);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct kwork_class *get_kwork_class(struct perf_kwork *kwork,
|
||||
enum kwork_class_type type)
|
||||
{
|
||||
struct kwork_class *class;
|
||||
|
||||
list_for_each_entry(class, &kwork->class_list, list) {
|
||||
if (class->type == type)
|
||||
return class;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void report_update_exit_event(struct kwork_work *work,
|
||||
struct kwork_atom *atom,
|
||||
struct perf_sample *sample)
|
||||
@ -789,6 +841,54 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void top_update_runtime(struct kwork_work *work,
|
||||
struct kwork_atom *atom,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
u64 delta;
|
||||
u64 exit_time = sample->time;
|
||||
u64 entry_time = atom->time;
|
||||
|
||||
if ((entry_time != 0) && (exit_time >= entry_time)) {
|
||||
delta = exit_time - entry_time;
|
||||
work->total_runtime += delta;
|
||||
}
|
||||
}
|
||||
|
||||
static int top_entry_event(struct perf_kwork *kwork,
|
||||
struct kwork_class *class,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
|
||||
KWORK_TRACE_MAX, evsel, sample,
|
||||
machine, NULL, true);
|
||||
}
|
||||
|
||||
static int top_sched_switch_event(struct perf_kwork *kwork,
|
||||
struct kwork_class *class,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct kwork_atom *atom;
|
||||
struct kwork_work *work;
|
||||
|
||||
atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
|
||||
KWORK_TRACE_ENTRY, evsel, sample,
|
||||
machine, &work);
|
||||
if (!work)
|
||||
return -1;
|
||||
|
||||
if (atom) {
|
||||
top_update_runtime(work, atom, sample);
|
||||
atom_del(atom);
|
||||
}
|
||||
|
||||
return top_entry_event(kwork, class, evsel, sample, machine);
|
||||
}
|
||||
|
||||
static struct kwork_class kwork_irq;
|
||||
static int process_irq_handler_entry_event(struct perf_tool *tool,
|
||||
struct evsel *evsel,
|
||||
@ -1378,6 +1478,101 @@ static void print_bad_events(struct perf_kwork *kwork)
|
||||
}
|
||||
}
|
||||
|
||||
const char *graph_load = "||||||||||||||||||||||||||||||||||||||||||||||||";
|
||||
const char *graph_idle = " ";
|
||||
static void top_print_per_cpu_load(struct perf_kwork *kwork)
|
||||
{
|
||||
int i, load_width;
|
||||
u64 total, load, load_ratio;
|
||||
struct kwork_top_stat *stat = &kwork->top_stat;
|
||||
|
||||
for (i = 0; i < MAX_NR_CPUS; i++) {
|
||||
total = stat->cpus_runtime[i].total;
|
||||
load = stat->cpus_runtime[i].load;
|
||||
if (test_bit(i, stat->all_cpus_bitmap) && total) {
|
||||
load_ratio = load * 10000 / total;
|
||||
load_width = PRINT_CPU_USAGE_HIST_WIDTH *
|
||||
load_ratio / 10000;
|
||||
|
||||
printf("%%Cpu%-*d[%.*s%.*s %*.*f%%]\n",
|
||||
PRINT_CPU_WIDTH, i,
|
||||
load_width, graph_load,
|
||||
PRINT_CPU_USAGE_HIST_WIDTH - load_width,
|
||||
graph_idle,
|
||||
PRINT_CPU_USAGE_WIDTH,
|
||||
PRINT_CPU_USAGE_DECIMAL_WIDTH,
|
||||
(double)load_ratio / 100);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void top_print_cpu_usage(struct perf_kwork *kwork)
|
||||
{
|
||||
struct kwork_top_stat *stat = &kwork->top_stat;
|
||||
u64 idle_time = stat->cpus_runtime[MAX_NR_CPUS].idle;
|
||||
int cpus_nr = bitmap_weight(stat->all_cpus_bitmap, MAX_NR_CPUS);
|
||||
u64 cpus_total_time = stat->cpus_runtime[MAX_NR_CPUS].total;
|
||||
|
||||
printf("Total : %*.*f ms, %d cpus\n",
|
||||
PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
|
||||
(double)cpus_total_time / NSEC_PER_MSEC,
|
||||
cpus_nr);
|
||||
|
||||
printf("%%Cpu(s): %*.*f%% id\n",
|
||||
PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
|
||||
cpus_total_time ? (double)idle_time * 100 / cpus_total_time : 0);
|
||||
|
||||
top_print_per_cpu_load(kwork);
|
||||
}
|
||||
|
||||
static void top_print_header(struct perf_kwork *kwork __maybe_unused)
|
||||
{
|
||||
int ret;
|
||||
|
||||
printf("\n ");
|
||||
ret = printf(" %*s %*s %*s %-*s",
|
||||
PRINT_PID_WIDTH, "PID",
|
||||
PRINT_CPU_USAGE_WIDTH, "%CPU",
|
||||
PRINT_RUNTIME_HEADER_WIDTH + RPINT_DECIMAL_WIDTH, "RUNTIME",
|
||||
PRINT_TASK_NAME_WIDTH, "COMMMAND");
|
||||
printf("\n ");
|
||||
print_separator(ret);
|
||||
}
|
||||
|
||||
static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
printf(" ");
|
||||
|
||||
/*
|
||||
* pid
|
||||
*/
|
||||
ret += printf(" %*ld ", PRINT_PID_WIDTH, work->id);
|
||||
|
||||
/*
|
||||
* cpu usage
|
||||
*/
|
||||
ret += printf(" %*.*f ",
|
||||
PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
|
||||
(double)work->cpu_usage / 100);
|
||||
|
||||
/*
|
||||
* total runtime
|
||||
*/
|
||||
ret += printf(" %*.*f ms ",
|
||||
PRINT_RUNTIME_WIDTH + RPINT_DECIMAL_WIDTH, RPINT_DECIMAL_WIDTH,
|
||||
(double)work->total_runtime / NSEC_PER_MSEC);
|
||||
|
||||
/*
|
||||
* command
|
||||
*/
|
||||
ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name);
|
||||
|
||||
printf("\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void work_sort(struct perf_kwork *kwork,
|
||||
struct kwork_class *class, struct rb_root_cached *root)
|
||||
{
|
||||
@ -1425,6 +1620,9 @@ static int perf_kwork__check_config(struct perf_kwork *kwork,
|
||||
.entry_event = timehist_entry_event,
|
||||
.exit_event = timehist_exit_event,
|
||||
};
|
||||
static struct trace_kwork_handler top_ops = {
|
||||
.sched_switch_event = top_sched_switch_event,
|
||||
};
|
||||
|
||||
switch (kwork->report) {
|
||||
case KWORK_REPORT_RUNTIME:
|
||||
@ -1436,6 +1634,9 @@ static int perf_kwork__check_config(struct perf_kwork *kwork,
|
||||
case KWORK_REPORT_TIMEHIST:
|
||||
kwork->tp_handler = &timehist_ops;
|
||||
break;
|
||||
case KWORK_REPORT_TOP:
|
||||
kwork->tp_handler = &top_ops;
|
||||
break;
|
||||
default:
|
||||
pr_debug("Invalid report type %d\n", kwork->report);
|
||||
return -1;
|
||||
@ -1682,6 +1883,169 @@ static int perf_kwork__timehist(struct perf_kwork *kwork)
|
||||
return perf_kwork__read_events(kwork);
|
||||
}
|
||||
|
||||
static void top_calc_total_runtime(struct perf_kwork *kwork)
|
||||
{
|
||||
struct kwork_class *class;
|
||||
struct kwork_work *work;
|
||||
struct rb_node *next;
|
||||
struct kwork_top_stat *stat = &kwork->top_stat;
|
||||
|
||||
class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
|
||||
if (!class)
|
||||
return;
|
||||
|
||||
next = rb_first_cached(&class->work_root);
|
||||
while (next) {
|
||||
work = rb_entry(next, struct kwork_work, node);
|
||||
BUG_ON(work->cpu >= MAX_NR_CPUS);
|
||||
stat->cpus_runtime[work->cpu].total += work->total_runtime;
|
||||
stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime;
|
||||
next = rb_next(next);
|
||||
}
|
||||
}
|
||||
|
||||
static void top_calc_idle_time(struct perf_kwork *kwork,
|
||||
struct kwork_work *work)
|
||||
{
|
||||
struct kwork_top_stat *stat = &kwork->top_stat;
|
||||
|
||||
if (work->id == 0) {
|
||||
stat->cpus_runtime[work->cpu].idle += work->total_runtime;
|
||||
stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime;
|
||||
}
|
||||
}
|
||||
|
||||
static void top_calc_cpu_usage(struct perf_kwork *kwork)
|
||||
{
|
||||
struct kwork_class *class;
|
||||
struct kwork_work *work;
|
||||
struct rb_node *next;
|
||||
struct kwork_top_stat *stat = &kwork->top_stat;
|
||||
|
||||
class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
|
||||
if (!class)
|
||||
return;
|
||||
|
||||
next = rb_first_cached(&class->work_root);
|
||||
while (next) {
|
||||
work = rb_entry(next, struct kwork_work, node);
|
||||
|
||||
if (work->total_runtime == 0)
|
||||
goto next;
|
||||
|
||||
__set_bit(work->cpu, stat->all_cpus_bitmap);
|
||||
|
||||
work->cpu_usage = work->total_runtime * 10000 /
|
||||
stat->cpus_runtime[work->cpu].total;
|
||||
|
||||
top_calc_idle_time(kwork, work);
|
||||
next:
|
||||
next = rb_next(next);
|
||||
}
|
||||
}
|
||||
|
||||
static void top_calc_load_runtime(struct perf_kwork *kwork,
|
||||
struct kwork_work *work)
|
||||
{
|
||||
struct kwork_top_stat *stat = &kwork->top_stat;
|
||||
|
||||
if (work->id != 0) {
|
||||
stat->cpus_runtime[work->cpu].load += work->total_runtime;
|
||||
stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime;
|
||||
}
|
||||
}
|
||||
|
||||
static void top_merge_tasks(struct perf_kwork *kwork)
|
||||
{
|
||||
struct kwork_work *merged_work, *data;
|
||||
struct kwork_class *class;
|
||||
struct rb_node *node;
|
||||
int cpu;
|
||||
struct rb_root_cached merged_root = RB_ROOT_CACHED;
|
||||
|
||||
class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
|
||||
if (!class)
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
node = rb_first_cached(&class->work_root);
|
||||
if (!node)
|
||||
break;
|
||||
|
||||
rb_erase_cached(node, &class->work_root);
|
||||
data = rb_entry(node, struct kwork_work, node);
|
||||
|
||||
cpu = data->cpu;
|
||||
merged_work = find_work_by_id(&merged_root, data->id,
|
||||
data->id == 0 ? cpu : -1);
|
||||
if (!merged_work) {
|
||||
work_insert(&merged_root, data, &kwork->cmp_id);
|
||||
} else {
|
||||
merged_work->total_runtime += data->total_runtime;
|
||||
merged_work->cpu_usage += data->cpu_usage;
|
||||
}
|
||||
|
||||
top_calc_load_runtime(kwork, data);
|
||||
}
|
||||
|
||||
work_sort(kwork, class, &merged_root);
|
||||
}
|
||||
|
||||
static void perf_kwork__top_report(struct perf_kwork *kwork)
|
||||
{
|
||||
struct kwork_work *work;
|
||||
struct rb_node *next;
|
||||
|
||||
printf("\n");
|
||||
|
||||
top_print_cpu_usage(kwork);
|
||||
top_print_header(kwork);
|
||||
next = rb_first_cached(&kwork->sorted_work_root);
|
||||
while (next) {
|
||||
work = rb_entry(next, struct kwork_work, node);
|
||||
process_skipped_events(kwork, work);
|
||||
|
||||
if (work->total_runtime == 0)
|
||||
goto next;
|
||||
|
||||
top_print_work(kwork, work);
|
||||
|
||||
next:
|
||||
next = rb_next(next);
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static int perf_kwork__top(struct perf_kwork *kwork)
|
||||
{
|
||||
struct __top_cpus_runtime *cpus_runtime;
|
||||
int ret = 0;
|
||||
|
||||
cpus_runtime = zalloc(sizeof(struct __top_cpus_runtime) * (MAX_NR_CPUS + 1));
|
||||
if (!cpus_runtime)
|
||||
return -1;
|
||||
|
||||
kwork->top_stat.cpus_runtime = cpus_runtime;
|
||||
bitmap_zero(kwork->top_stat.all_cpus_bitmap, MAX_NR_CPUS);
|
||||
|
||||
ret = perf_kwork__read_events(kwork);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
top_calc_total_runtime(kwork);
|
||||
top_calc_cpu_usage(kwork);
|
||||
top_merge_tasks(kwork);
|
||||
|
||||
setup_pager();
|
||||
|
||||
perf_kwork__top_report(kwork);
|
||||
|
||||
out:
|
||||
free(kwork->top_stat.cpus_runtime);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void setup_event_list(struct perf_kwork *kwork,
|
||||
const struct option *options,
|
||||
const char * const usage_msg[])
|
||||
@ -1801,6 +2165,7 @@ int cmd_kwork(int argc, const char **argv)
|
||||
};
|
||||
static const char default_report_sort_order[] = "runtime, max, count";
|
||||
static const char default_latency_sort_order[] = "avg, max, count";
|
||||
static const char default_top_sort_order[] = "rate, runtime";
|
||||
const struct option kwork_options[] = {
|
||||
OPT_INCR('v', "verbose", &verbose,
|
||||
"be more verbose (show symbol address, etc)"),
|
||||
@ -1868,6 +2233,9 @@ int cmd_kwork(int argc, const char **argv)
|
||||
"input file name"),
|
||||
OPT_PARENT(kwork_options)
|
||||
};
|
||||
const struct option top_options[] = {
|
||||
OPT_PARENT(kwork_options)
|
||||
};
|
||||
const char *kwork_usage[] = {
|
||||
NULL,
|
||||
NULL
|
||||
@ -1884,8 +2252,12 @@ int cmd_kwork(int argc, const char **argv)
|
||||
"perf kwork timehist [<options>]",
|
||||
NULL
|
||||
};
|
||||
const char * const top_usage[] = {
|
||||
"perf kwork top [<options>]",
|
||||
NULL
|
||||
};
|
||||
const char *const kwork_subcommands[] = {
|
||||
"record", "report", "latency", "timehist", NULL
|
||||
"record", "report", "latency", "timehist", "top", NULL
|
||||
};
|
||||
|
||||
argc = parse_options_subcommand(argc, argv, kwork_options,
|
||||
@ -1930,6 +2302,19 @@ int cmd_kwork(int argc, const char **argv)
|
||||
kwork.report = KWORK_REPORT_TIMEHIST;
|
||||
setup_event_list(&kwork, kwork_options, kwork_usage);
|
||||
return perf_kwork__timehist(&kwork);
|
||||
} else if (strlen(argv[0]) > 2 && strstarts("top", argv[0])) {
|
||||
kwork.sort_order = default_top_sort_order;
|
||||
if (argc > 1) {
|
||||
argc = parse_options(argc, argv, top_options, top_usage, 0);
|
||||
if (argc)
|
||||
usage_with_options(top_usage, top_options);
|
||||
}
|
||||
kwork.report = KWORK_REPORT_TOP;
|
||||
if (!kwork.event_list_str)
|
||||
kwork.event_list_str = "sched";
|
||||
setup_event_list(&kwork, kwork_options, kwork_usage);
|
||||
setup_sorting(&kwork, top_options, top_usage);
|
||||
return perf_kwork__top(&kwork);
|
||||
} else
|
||||
usage_with_options(kwork_usage, kwork_options);
|
||||
|
||||
|
@ -24,6 +24,7 @@ enum kwork_report_type {
|
||||
KWORK_REPORT_RUNTIME,
|
||||
KWORK_REPORT_LATENCY,
|
||||
KWORK_REPORT_TIMEHIST,
|
||||
KWORK_REPORT_TOP,
|
||||
};
|
||||
|
||||
enum kwork_trace_type {
|
||||
@ -129,6 +130,11 @@ struct kwork_work {
|
||||
u64 max_latency_start;
|
||||
u64 max_latency_end;
|
||||
u64 total_latency;
|
||||
|
||||
/*
|
||||
* top report
|
||||
*/
|
||||
u32 cpu_usage;
|
||||
};
|
||||
|
||||
struct kwork_class {
|
||||
@ -174,6 +180,17 @@ struct trace_kwork_handler {
|
||||
struct perf_sample *sample, struct machine *machine);
|
||||
};
|
||||
|
||||
struct __top_cpus_runtime {
|
||||
u64 load;
|
||||
u64 idle;
|
||||
u64 total;
|
||||
};
|
||||
|
||||
struct kwork_top_stat {
|
||||
DECLARE_BITMAP(all_cpus_bitmap, MAX_NR_CPUS);
|
||||
struct __top_cpus_runtime *cpus_runtime;
|
||||
};
|
||||
|
||||
struct perf_kwork {
|
||||
/*
|
||||
* metadata
|
||||
@ -225,6 +242,11 @@ struct perf_kwork {
|
||||
u64 all_runtime;
|
||||
u64 all_count;
|
||||
u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
|
||||
|
||||
/*
|
||||
* perf kwork top data
|
||||
*/
|
||||
struct kwork_top_stat top_stat;
|
||||
};
|
||||
|
||||
struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
|
||||
|
Loading…
Reference in New Issue
Block a user