1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/zalloc.h>
5 #include "block-info.h"
14 static struct block_header_column {
17 } block_columns[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
18 [PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT] = {
19 .name = "Sampled Cycles%",
22 [PERF_HPP_REPORT__BLOCK_LBR_CYCLES] = {
23 .name = "Sampled Cycles",
26 [PERF_HPP_REPORT__BLOCK_CYCLES_PCT] = {
27 .name = "Avg Cycles%",
30 [PERF_HPP_REPORT__BLOCK_AVG_CYCLES] = {
34 [PERF_HPP_REPORT__BLOCK_RANGE] = {
35 .name = "[Program Block Range]",
38 [PERF_HPP_REPORT__BLOCK_DSO] = {
39 .name = "Shared Object",
44 struct block_info *block_info__get(struct block_info *bi)
47 refcount_inc(&bi->refcnt);
51 void block_info__put(struct block_info *bi)
53 if (bi && refcount_dec_and_test(&bi->refcnt))
57 struct block_info *block_info__new(void)
59 struct block_info *bi = zalloc(sizeof(*bi));
62 refcount_set(&bi->refcnt, 1);
66 int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
67 struct hist_entry *left, struct hist_entry *right)
69 struct block_info *bi_l = left->block_info;
70 struct block_info *bi_r = right->block_info;
73 if (!bi_l->sym || !bi_r->sym) {
74 if (!bi_l->sym && !bi_r->sym)
82 if (bi_l->sym == bi_r->sym) {
83 if (bi_l->start == bi_r->start) {
84 if (bi_l->end == bi_r->end)
87 return (int64_t)(bi_r->end - bi_l->end);
89 return (int64_t)(bi_r->start - bi_l->start);
91 cmp = strcmp(bi_l->sym->name, bi_r->sym->name);
95 if (bi_l->sym->start != bi_r->sym->start)
96 return (int64_t)(bi_r->sym->start - bi_l->sym->start);
98 return (int64_t)(bi_r->sym->end - bi_l->sym->end);
101 static void init_block_info(struct block_info *bi, struct symbol *sym,
102 struct cyc_hist *ch, int offset,
106 bi->start = ch->start;
108 bi->cycles = ch->cycles;
109 bi->cycles_aggr = ch->cycles_aggr;
111 bi->num_aggr = ch->num_aggr;
112 bi->total_cycles = total_cycles;
114 memcpy(bi->cycles_spark, ch->cycles_spark,
115 NUM_SPARKS * sizeof(u64));
118 int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
119 u64 *block_cycles_aggr, u64 total_cycles)
121 struct annotation *notes;
123 static struct addr_location al;
126 if (!he->ms.map || !he->ms.sym)
129 memset(&al, 0, sizeof(al));
133 notes = symbol__annotation(he->ms.sym);
134 if (!notes || !notes->src || !notes->src->cycles_hist)
136 ch = notes->src->cycles_hist;
137 for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
138 if (ch[i].num_aggr) {
139 struct block_info *bi;
140 struct hist_entry *he_block;
142 bi = block_info__new();
146 init_block_info(bi, he->ms.sym, &ch[i], i,
148 cycles += bi->cycles_aggr / bi->num_aggr;
150 he_block = hists__add_entry_block(&bh->block_hists,
159 if (block_cycles_aggr)
160 *block_cycles_aggr += cycles;
165 static int block_column_header(struct perf_hpp_fmt *fmt,
166 struct perf_hpp *hpp,
167 struct hists *hists __maybe_unused,
168 int line __maybe_unused,
169 int *span __maybe_unused)
171 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
173 return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
177 static int block_column_width(struct perf_hpp_fmt *fmt,
178 struct perf_hpp *hpp __maybe_unused,
179 struct hists *hists __maybe_unused)
181 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
183 return block_fmt->width;
186 static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
187 struct perf_hpp *hpp,
188 struct hist_entry *he)
190 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
191 struct block_info *bi = he->block_info;
195 if (block_fmt->total_cycles)
196 ratio = (double)bi->cycles / (double)block_fmt->total_cycles;
198 sprintf(buf, "%.2f%%", 100.0 * ratio);
200 return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
203 static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
204 struct hist_entry *left,
205 struct hist_entry *right)
207 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
208 struct block_info *bi_l = left->block_info;
209 struct block_info *bi_r = right->block_info;
212 if (block_fmt->total_cycles) {
213 l = ((double)bi_l->cycles /
214 (double)block_fmt->total_cycles) * 100000.0;
215 r = ((double)bi_r->cycles /
216 (double)block_fmt->total_cycles) * 100000.0;
217 return (int64_t)l - (int64_t)r;
223 static void cycles_string(u64 cycles, char *buf, int size)
225 if (cycles >= 1000000)
226 scnprintf(buf, size, "%.1fM", (double)cycles / 1000000.0);
227 else if (cycles >= 1000)
228 scnprintf(buf, size, "%.1fK", (double)cycles / 1000.0);
230 scnprintf(buf, size, "%1d", cycles);
233 static int block_cycles_lbr_entry(struct perf_hpp_fmt *fmt,
234 struct perf_hpp *hpp, struct hist_entry *he)
236 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
237 struct block_info *bi = he->block_info;
240 cycles_string(bi->cycles_aggr, cycles_buf, sizeof(cycles_buf));
242 return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
246 static int block_cycles_pct_entry(struct perf_hpp_fmt *fmt,
247 struct perf_hpp *hpp, struct hist_entry *he)
249 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
250 struct block_info *bi = he->block_info;
255 if (block_fmt->block_cycles && bi->num_aggr) {
256 avg = bi->cycles_aggr / bi->num_aggr;
257 ratio = (double)avg / (double)block_fmt->block_cycles;
260 sprintf(buf, "%.2f%%", 100.0 * ratio);
262 return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
265 static int block_avg_cycles_entry(struct perf_hpp_fmt *fmt,
266 struct perf_hpp *hpp,
267 struct hist_entry *he)
269 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
270 struct block_info *bi = he->block_info;
273 cycles_string(bi->cycles_aggr / bi->num_aggr, cycles_buf,
276 return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
280 static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
281 struct hist_entry *he)
283 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
284 struct block_info *bi = he->block_info;
286 char *start_line, *end_line;
288 symbol_conf.disable_add2line_warn = true;
290 start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
293 end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
296 if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
297 scnprintf(buf, sizeof(buf), "[%s -> %s]",
298 start_line, end_line);
300 scnprintf(buf, sizeof(buf), "[%7lx -> %7lx]",
304 free_srcline(start_line);
305 free_srcline(end_line);
307 return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
310 static int block_dso_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
311 struct hist_entry *he)
313 struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
314 struct map *map = he->ms.map;
316 if (map && map->dso) {
317 return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
318 map->dso->short_name);
321 return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
325 static void init_block_header(struct block_fmt *block_fmt)
327 struct perf_hpp_fmt *fmt = &block_fmt->fmt;
329 BUG_ON(block_fmt->idx >= PERF_HPP_REPORT__BLOCK_MAX_INDEX);
331 block_fmt->header = block_columns[block_fmt->idx].name;
332 block_fmt->width = block_columns[block_fmt->idx].width;
334 fmt->header = block_column_header;
335 fmt->width = block_column_width;
338 static void hpp_register(struct block_fmt *block_fmt, int idx,
339 struct perf_hpp_list *hpp_list)
341 struct perf_hpp_fmt *fmt = &block_fmt->fmt;
343 block_fmt->idx = idx;
344 INIT_LIST_HEAD(&fmt->list);
345 INIT_LIST_HEAD(&fmt->sort_list);
348 case PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT:
349 fmt->entry = block_total_cycles_pct_entry;
350 fmt->cmp = block_info__cmp;
351 fmt->sort = block_total_cycles_pct_sort;
353 case PERF_HPP_REPORT__BLOCK_LBR_CYCLES:
354 fmt->entry = block_cycles_lbr_entry;
356 case PERF_HPP_REPORT__BLOCK_CYCLES_PCT:
357 fmt->entry = block_cycles_pct_entry;
359 case PERF_HPP_REPORT__BLOCK_AVG_CYCLES:
360 fmt->entry = block_avg_cycles_entry;
362 case PERF_HPP_REPORT__BLOCK_RANGE:
363 fmt->entry = block_range_entry;
365 case PERF_HPP_REPORT__BLOCK_DSO:
366 fmt->entry = block_dso_entry;
372 init_block_header(block_fmt);
373 perf_hpp_list__column_register(hpp_list, fmt);
376 static void register_block_columns(struct perf_hpp_list *hpp_list,
377 struct block_fmt *block_fmts)
379 for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++)
380 hpp_register(&block_fmts[i], i, hpp_list);
383 static void init_block_hist(struct block_hist *bh, struct block_fmt *block_fmts)
385 __hists__init(&bh->block_hists, &bh->block_list);
386 perf_hpp_list__init(&bh->block_list);
387 bh->block_list.nr_header_lines = 1;
389 register_block_columns(&bh->block_list, block_fmts);
391 perf_hpp_list__register_sort_field(&bh->block_list,
392 &block_fmts[PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT].fmt);
395 static void process_block_report(struct hists *hists,
396 struct block_report *block_report,
399 struct rb_node *next = rb_first_cached(&hists->entries);
400 struct block_hist *bh = &block_report->hist;
401 struct hist_entry *he;
403 init_block_hist(bh, block_report->fmts);
406 he = rb_entry(next, struct hist_entry, rb_node);
407 block_info__process_sym(he, bh, &block_report->cycles,
409 next = rb_next(&he->rb_node);
412 for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++) {
413 block_report->fmts[i].total_cycles = total_cycles;
414 block_report->fmts[i].block_cycles = block_report->cycles;
417 hists__output_resort(&bh->block_hists, NULL);
420 struct block_report *block_info__create_report(struct evlist *evlist,
423 struct block_report *block_reports;
424 int nr_hists = evlist->core.nr_entries, i = 0;
427 block_reports = calloc(nr_hists, sizeof(struct block_report));
431 evlist__for_each_entry(evlist, pos) {
432 struct hists *hists = evsel__hists(pos);
434 process_block_report(hists, &block_reports[i], total_cycles);
438 return block_reports;
441 int report__browse_block_hists(struct block_hist *bh, float min_percent,
442 struct evsel *evsel __maybe_unused)
444 switch (use_browser) {
446 symbol_conf.report_individual_block = true;
447 hists__fprintf(&bh->block_hists, true, 0, 0, min_percent,
449 hists__delete_entries(&bh->block_hists);
458 float block_info__total_cycles_percent(struct hist_entry *he)
460 struct block_info *bi = he->block_info;
462 if (bi->total_cycles)
463 return bi->cycles * 100.0 / bi->total_cycles;