1 // SPDX-License-Identifier: GPL-2.0-only
3 * db-export.c: Support for exporting data suitable for import to a database
4 * Copyright (c) 2014, Intel Corporation.
18 #include "thread-stack.h"
19 #include "callchain.h"
20 #include "call-path.h"
21 #include "db-export.h"
23 struct deferred_export {
24 struct list_head node;
28 static int db_export__deferred(struct db_export *dbe)
30 struct deferred_export *de;
33 while (!list_empty(&dbe->deferred)) {
34 de = list_entry(dbe->deferred.next, struct deferred_export,
36 err = dbe->export_comm(dbe, de->comm);
46 static void db_export__free_deferred(struct db_export *dbe)
48 struct deferred_export *de;
50 while (!list_empty(&dbe->deferred)) {
51 de = list_entry(dbe->deferred.next, struct deferred_export,
58 static int db_export__defer_comm(struct db_export *dbe, struct comm *comm)
60 struct deferred_export *de;
62 de = zalloc(sizeof(struct deferred_export));
67 list_add_tail(&de->node, &dbe->deferred);
72 int db_export__init(struct db_export *dbe)
74 memset(dbe, 0, sizeof(struct db_export));
75 INIT_LIST_HEAD(&dbe->deferred);
79 int db_export__flush(struct db_export *dbe)
81 return db_export__deferred(dbe);
84 void db_export__exit(struct db_export *dbe)
86 db_export__free_deferred(dbe);
87 call_return_processor__free(dbe->crp);
91 int db_export__evsel(struct db_export *dbe, struct perf_evsel *evsel)
96 evsel->db_id = ++dbe->evsel_last_db_id;
98 if (dbe->export_evsel)
99 return dbe->export_evsel(dbe, evsel);
104 int db_export__machine(struct db_export *dbe, struct machine *machine)
109 machine->db_id = ++dbe->machine_last_db_id;
111 if (dbe->export_machine)
112 return dbe->export_machine(dbe, machine);
117 int db_export__thread(struct db_export *dbe, struct thread *thread,
118 struct machine *machine, struct comm *comm)
120 struct thread *main_thread;
121 u64 main_thread_db_id = 0;
127 thread->db_id = ++dbe->thread_last_db_id;
129 if (thread->pid_ != -1) {
130 if (thread->pid_ == thread->tid) {
131 main_thread = thread;
133 main_thread = machine__findnew_thread(machine,
138 err = db_export__thread(dbe, main_thread, machine,
143 err = db_export__comm_thread(dbe, comm, thread);
148 main_thread_db_id = main_thread->db_id;
149 if (main_thread != thread)
150 thread__put(main_thread);
153 if (dbe->export_thread)
154 return dbe->export_thread(dbe, thread, main_thread_db_id,
160 thread__put(main_thread);
164 int db_export__comm(struct db_export *dbe, struct comm *comm,
165 struct thread *main_thread)
172 comm->db_id = ++dbe->comm_last_db_id;
174 if (dbe->export_comm) {
175 if (main_thread->comm_set)
176 err = dbe->export_comm(dbe, comm);
178 err = db_export__defer_comm(dbe, comm);
183 return db_export__comm_thread(dbe, comm, main_thread);
186 int db_export__comm_thread(struct db_export *dbe, struct comm *comm,
187 struct thread *thread)
191 db_id = ++dbe->comm_thread_last_db_id;
193 if (dbe->export_comm_thread)
194 return dbe->export_comm_thread(dbe, db_id, comm, thread);
199 int db_export__dso(struct db_export *dbe, struct dso *dso,
200 struct machine *machine)
205 dso->db_id = ++dbe->dso_last_db_id;
208 return dbe->export_dso(dbe, dso, machine);
213 int db_export__symbol(struct db_export *dbe, struct symbol *sym,
216 u64 *sym_db_id = symbol__priv(sym);
221 *sym_db_id = ++dbe->symbol_last_db_id;
223 if (dbe->export_symbol)
224 return dbe->export_symbol(dbe, sym, dso);
229 static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
230 u64 *dso_db_id, u64 *sym_db_id, u64 *offset)
235 struct dso *dso = al->map->dso;
237 err = db_export__dso(dbe, dso, al->machine);
240 *dso_db_id = dso->db_id;
243 al->sym = symbol__new(al->addr, 0, 0, 0, "unknown");
245 dso__insert_symbol(dso, al->sym);
249 u64 *db_id = symbol__priv(al->sym);
251 err = db_export__symbol(dbe, al->sym, dso);
255 *offset = al->addr - al->sym->start;
262 static struct call_path *call_path_from_sample(struct db_export *dbe,
263 struct machine *machine,
264 struct thread *thread,
265 struct perf_sample *sample,
266 struct perf_evsel *evsel)
268 u64 kernel_start = machine__kernel_start(machine);
269 struct call_path *current = &dbe->cpr->call_path;
270 enum chain_order saved_order = callchain_param.order;
273 if (!symbol_conf.use_callchain || !sample->callchain)
277 * Since the call path tree must be built starting with the root, we
278 * must use ORDER_CALL for call chain resolution, in order to process
279 * the callchain starting with the root node and ending with the leaf.
281 callchain_param.order = ORDER_CALLER;
282 err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
283 sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
285 callchain_param.order = saved_order;
288 callchain_cursor_commit(&callchain_cursor);
291 struct callchain_cursor_node *node;
292 struct addr_location al;
293 u64 dso_db_id = 0, sym_db_id = 0, offset = 0;
295 memset(&al, 0, sizeof(al));
297 node = callchain_cursor_current(&callchain_cursor);
301 * Handle export of symbol and dso for this node by
302 * constructing an addr_location struct and then passing it to
303 * db_ids_from_al() to perform the export.
307 al.machine = machine;
310 if (al.map && !al.sym)
311 al.sym = dso__find_symbol(al.map->dso, al.addr);
313 db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset);
315 /* add node to the call path tree if it doesn't exist */
316 current = call_path__findnew(dbe->cpr, current,
320 callchain_cursor_advance(&callchain_cursor);
323 /* Reset the callchain order to its prior value. */
324 callchain_param.order = saved_order;
326 if (current == &dbe->cpr->call_path) {
327 /* Bail because the callchain was empty. */
334 int db_export__branch_type(struct db_export *dbe, u32 branch_type,
337 if (dbe->export_branch_type)
338 return dbe->export_branch_type(dbe, branch_type, name);
343 int db_export__sample(struct db_export *dbe, union perf_event *event,
344 struct perf_sample *sample, struct perf_evsel *evsel,
345 struct addr_location *al)
347 struct thread* thread = al->thread;
348 struct export_sample es = {
354 struct thread *main_thread;
355 struct comm *comm = NULL;
358 err = db_export__evsel(dbe, evsel);
362 err = db_export__machine(dbe, al->machine);
366 main_thread = thread__main_thread(al->machine, thread);
368 comm = machine__thread_exec_comm(al->machine, main_thread);
370 err = db_export__thread(dbe, thread, al->machine, comm);
375 err = db_export__comm(dbe, comm, main_thread);
378 es.comm_db_id = comm->db_id;
381 es.db_id = ++dbe->sample_last_db_id;
383 err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset);
388 struct call_path *cp = call_path_from_sample(dbe, al->machine,
392 db_export__call_path(dbe, cp);
393 es.call_path_id = cp->db_id;
397 if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
398 sample_addr_correlates_sym(&evsel->attr)) {
399 struct addr_location addr_al;
401 thread__resolve(thread, &addr_al, sample);
402 err = db_ids_from_al(dbe, &addr_al, &es.addr_dso_db_id,
403 &es.addr_sym_db_id, &es.addr_offset);
407 err = thread_stack__process(thread, comm, sample, al,
415 if (dbe->export_sample)
416 err = dbe->export_sample(dbe, &es);
419 thread__put(main_thread);
428 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
429 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
430 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "conditional jump"},
431 {PERF_IP_FLAG_BRANCH, "unconditional jump"},
432 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT,
433 "software interrupt"},
434 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT,
435 "return from interrupt"},
436 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET,
438 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET,
439 "return from system call"},
440 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "asynchronous branch"},
441 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
442 PERF_IP_FLAG_INTERRUPT, "hardware interrupt"},
443 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "transaction abort"},
444 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "trace begin"},
445 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "trace end"},
449 int db_export__branch_types(struct db_export *dbe)
453 for (i = 0; branch_types[i].name ; i++) {
454 err = db_export__branch_type(dbe, branch_types[i].branch_type,
455 branch_types[i].name);
460 /* Add trace begin / end variants */
461 for (i = 0; branch_types[i].name ; i++) {
462 const char *name = branch_types[i].name;
463 u32 type = branch_types[i].branch_type;
466 if (type == PERF_IP_FLAG_BRANCH ||
467 (type & (PERF_IP_FLAG_TRACE_BEGIN | PERF_IP_FLAG_TRACE_END)))
470 snprintf(buf, sizeof(buf), "trace begin / %s", name);
471 err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_BEGIN, buf);
475 snprintf(buf, sizeof(buf), "%s / trace end", name);
476 err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_END, buf);
484 int db_export__call_path(struct db_export *dbe, struct call_path *cp)
492 err = db_export__call_path(dbe, cp->parent);
497 cp->db_id = ++dbe->call_path_last_db_id;
499 if (dbe->export_call_path)
500 return dbe->export_call_path(dbe, cp);
505 int db_export__call_return(struct db_export *dbe, struct call_return *cr,
510 err = db_export__call_path(dbe, cr->cp);
515 cr->db_id = ++dbe->call_return_last_db_id;
519 *parent_db_id = ++dbe->call_return_last_db_id;
520 cr->parent_db_id = *parent_db_id;
523 if (dbe->export_call_return)
524 return dbe->export_call_return(dbe, cr);