2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 #include <linux/kernel.h>
21 #include <linux/types.h>
36 #include "thread-stack.h"
38 #include "callchain.h"
46 #include "intel-pt-decoder/intel-pt-log.h"
47 #include "intel-pt-decoder/intel-pt-decoder.h"
48 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
49 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
51 #define MAX_TIMESTAMP (~0ULL)
54 struct auxtrace auxtrace;
55 struct auxtrace_queues queues;
56 struct auxtrace_heap heap;
58 struct perf_session *session;
59 struct machine *machine;
60 struct perf_evsel *switch_evsel;
61 struct thread *unknown_thread;
62 bool timeless_decoding;
71 int have_sched_switch;
77 struct perf_tsc_conversion tc;
78 bool cap_user_time_zero;
80 struct itrace_synth_opts synth_opts;
82 bool sample_instructions;
83 u64 instructions_sample_type;
84 u64 instructions_sample_period;
89 u64 branches_sample_type;
92 bool sample_transactions;
93 u64 transactions_sample_type;
96 bool synth_needs_swap;
105 unsigned max_non_turbo_ratio;
107 unsigned long num_events;
110 struct addr_filters filts;
114 INTEL_PT_SS_NOT_TRACING,
117 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
118 INTEL_PT_SS_EXPECTING_SWITCH_IP,
121 struct intel_pt_queue {
123 unsigned int queue_nr;
124 struct auxtrace_buffer *buffer;
126 const struct intel_pt_state *state;
127 struct ip_callchain *chain;
128 struct branch_stack *last_branch;
129 struct branch_stack *last_branch_rb;
130 size_t last_branch_pos;
131 union perf_event *event_buf;
134 bool step_through_buffers;
135 bool use_buffer_pid_tid;
140 struct thread *thread;
148 char insn[INTEL_PT_INSN_BUF_SZ];
151 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
152 unsigned char *buf, size_t len)
154 struct intel_pt_pkt packet;
157 char desc[INTEL_PT_PKT_DESC_MAX];
158 const char *color = PERF_COLOR_BLUE;
160 color_fprintf(stdout, color,
161 ". ... Intel Processor Trace data: size %zu bytes\n",
165 ret = intel_pt_get_packet(buf, len, &packet);
171 color_fprintf(stdout, color, " %08x: ", pos);
172 for (i = 0; i < pkt_len; i++)
173 color_fprintf(stdout, color, " %02x", buf[i]);
175 color_fprintf(stdout, color, " ");
177 ret = intel_pt_pkt_desc(&packet, desc,
178 INTEL_PT_PKT_DESC_MAX);
180 color_fprintf(stdout, color, " %s\n", desc);
182 color_fprintf(stdout, color, " Bad packet!\n");
190 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
194 intel_pt_dump(pt, buf, len);
197 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
198 struct auxtrace_buffer *b)
202 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
206 b->use_size = b->data + b->size - start;
211 static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue *ptq,
212 struct auxtrace_queue *queue,
213 struct auxtrace_buffer *buffer)
215 if (queue->cpu == -1 && buffer->cpu != -1)
216 ptq->cpu = buffer->cpu;
218 ptq->pid = buffer->pid;
219 ptq->tid = buffer->tid;
221 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
222 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
224 thread__zput(ptq->thread);
226 if (ptq->tid != -1) {
228 ptq->thread = machine__findnew_thread(ptq->pt->machine,
232 ptq->thread = machine__find_thread(ptq->pt->machine, -1,
237 /* This function assumes data is processed sequentially only */
238 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
240 struct intel_pt_queue *ptq = data;
241 struct auxtrace_buffer *buffer = ptq->buffer, *old_buffer = buffer;
242 struct auxtrace_queue *queue;
249 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
251 buffer = auxtrace_buffer__next(queue, buffer);
254 auxtrace_buffer__drop_data(old_buffer);
259 ptq->buffer = buffer;
262 int fd = perf_data_file__fd(ptq->pt->session->file);
264 buffer->data = auxtrace_buffer__get_data(buffer, fd);
269 if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer &&
270 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
273 if (buffer->use_data) {
274 b->len = buffer->use_size;
275 b->buf = buffer->use_data;
277 b->len = buffer->size;
278 b->buf = buffer->data;
280 b->ref_timestamp = buffer->reference;
283 * If in snapshot mode and the buffer has no usable data, get next
284 * buffer and again check overlap against old_buffer.
286 if (ptq->pt->snapshot_mode && !b->len)
290 auxtrace_buffer__drop_data(old_buffer);
292 if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
293 !buffer->consecutive)) {
294 b->consecutive = false;
295 b->trace_nr = buffer->buffer_nr + 1;
297 b->consecutive = true;
300 if (ptq->use_buffer_pid_tid && (ptq->pid != buffer->pid ||
301 ptq->tid != buffer->tid))
302 intel_pt_use_buffer_pid_tid(ptq, queue, buffer);
304 if (ptq->step_through_buffers)
308 return intel_pt_get_trace(b, data);
313 struct intel_pt_cache_entry {
314 struct auxtrace_cache_entry entry;
317 enum intel_pt_insn_op op;
318 enum intel_pt_insn_branch branch;
321 char insn[INTEL_PT_INSN_BUF_SZ];
324 static int intel_pt_config_div(const char *var, const char *value, void *data)
329 if (!strcmp(var, "intel-pt.cache-divisor")) {
330 val = strtol(value, NULL, 0);
331 if (val > 0 && val <= INT_MAX)
338 static int intel_pt_cache_divisor(void)
345 perf_config(intel_pt_config_div, &d);
353 static unsigned int intel_pt_cache_size(struct dso *dso,
354 struct machine *machine)
358 size = dso__data_size(dso, machine);
359 size /= intel_pt_cache_divisor();
362 if (size > (1 << 21))
364 return 32 - __builtin_clz(size);
367 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
368 struct machine *machine)
370 struct auxtrace_cache *c;
373 if (dso->auxtrace_cache)
374 return dso->auxtrace_cache;
376 bits = intel_pt_cache_size(dso, machine);
378 /* Ignoring cache creation failure */
379 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
381 dso->auxtrace_cache = c;
386 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
387 u64 offset, u64 insn_cnt, u64 byte_cnt,
388 struct intel_pt_insn *intel_pt_insn)
390 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
391 struct intel_pt_cache_entry *e;
397 e = auxtrace_cache__alloc_entry(c);
401 e->insn_cnt = insn_cnt;
402 e->byte_cnt = byte_cnt;
403 e->op = intel_pt_insn->op;
404 e->branch = intel_pt_insn->branch;
405 e->length = intel_pt_insn->length;
406 e->rel = intel_pt_insn->rel;
407 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
409 err = auxtrace_cache__add(c, offset, &e->entry);
411 auxtrace_cache__free_entry(c, e);
416 static struct intel_pt_cache_entry *
417 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
419 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
424 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
427 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
428 uint64_t *insn_cnt_ptr, uint64_t *ip,
429 uint64_t to_ip, uint64_t max_insn_cnt,
432 struct intel_pt_queue *ptq = data;
433 struct machine *machine = ptq->pt->machine;
434 struct thread *thread;
435 struct addr_location al;
436 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
440 u64 offset, start_offset, start_ip;
444 intel_pt_insn->length = 0;
446 if (to_ip && *ip == to_ip)
449 if (*ip >= ptq->pt->kernel_start)
450 cpumode = PERF_RECORD_MISC_KERNEL;
452 cpumode = PERF_RECORD_MISC_USER;
454 thread = ptq->thread;
456 if (cpumode != PERF_RECORD_MISC_KERNEL)
458 thread = ptq->pt->unknown_thread;
462 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al);
463 if (!al.map || !al.map->dso)
466 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
467 dso__data_status_seen(al.map->dso,
468 DSO_DATA_STATUS_SEEN_ITRACE))
471 offset = al.map->map_ip(al.map, *ip);
473 if (!to_ip && one_map) {
474 struct intel_pt_cache_entry *e;
476 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
478 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
479 *insn_cnt_ptr = e->insn_cnt;
481 intel_pt_insn->op = e->op;
482 intel_pt_insn->branch = e->branch;
483 intel_pt_insn->length = e->length;
484 intel_pt_insn->rel = e->rel;
485 memcpy(intel_pt_insn->buf, e->insn,
486 INTEL_PT_INSN_BUF_SZ);
487 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
492 start_offset = offset;
495 /* Load maps to ensure dso->is_64_bit has been updated */
498 x86_64 = al.map->dso->is_64_bit;
501 len = dso__data_read_offset(al.map->dso, machine,
503 INTEL_PT_INSN_BUF_SZ);
507 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
510 intel_pt_log_insn(intel_pt_insn, *ip);
514 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
517 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
520 *ip += intel_pt_insn->length;
522 if (to_ip && *ip == to_ip)
525 if (*ip >= al.map->end)
528 offset += intel_pt_insn->length;
533 *insn_cnt_ptr = insn_cnt;
539 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
543 struct intel_pt_cache_entry *e;
545 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
550 /* Ignore cache errors */
551 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
552 *ip - start_ip, intel_pt_insn);
557 *insn_cnt_ptr = insn_cnt;
561 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
562 uint64_t offset, const char *filename)
564 struct addr_filter *filt;
565 bool have_filter = false;
566 bool hit_tracestop = false;
567 bool hit_filter = false;
569 list_for_each_entry(filt, &pt->filts.head, list) {
573 if ((filename && !filt->filename) ||
574 (!filename && filt->filename) ||
575 (filename && strcmp(filename, filt->filename)))
578 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
581 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
582 ip, offset, filename ? filename : "[kernel]",
583 filt->start ? "filter" : "stop",
584 filt->addr, filt->size);
589 hit_tracestop = true;
592 if (!hit_tracestop && !hit_filter)
593 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
594 ip, offset, filename ? filename : "[kernel]");
596 return hit_tracestop || (have_filter && !hit_filter);
599 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
601 struct intel_pt_queue *ptq = data;
602 struct thread *thread;
603 struct addr_location al;
607 if (ip >= ptq->pt->kernel_start)
608 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
610 cpumode = PERF_RECORD_MISC_USER;
612 thread = ptq->thread;
616 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
617 if (!al.map || !al.map->dso)
620 offset = al.map->map_ip(al.map, ip);
622 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
623 al.map->dso->long_name);
626 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
628 return __intel_pt_pgd_ip(ip, data) > 0;
631 static bool intel_pt_get_config(struct intel_pt *pt,
632 struct perf_event_attr *attr, u64 *config)
634 if (attr->type == pt->pmu_type) {
636 *config = attr->config;
643 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
645 struct perf_evsel *evsel;
647 evlist__for_each_entry(pt->session->evlist, evsel) {
648 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
649 !evsel->attr.exclude_kernel)
655 static bool intel_pt_return_compression(struct intel_pt *pt)
657 struct perf_evsel *evsel;
660 if (!pt->noretcomp_bit)
663 evlist__for_each_entry(pt->session->evlist, evsel) {
664 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
665 (config & pt->noretcomp_bit))
671 static bool intel_pt_branch_enable(struct intel_pt *pt)
673 struct perf_evsel *evsel;
676 evlist__for_each_entry(pt->session->evlist, evsel) {
677 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
678 (config & 1) && !(config & 0x2000))
684 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
686 struct perf_evsel *evsel;
690 if (!pt->mtc_freq_bits)
693 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
696 evlist__for_each_entry(pt->session->evlist, evsel) {
697 if (intel_pt_get_config(pt, &evsel->attr, &config))
698 return (config & pt->mtc_freq_bits) >> shift;
703 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
705 struct perf_evsel *evsel;
706 bool timeless_decoding = true;
709 if (!pt->tsc_bit || !pt->cap_user_time_zero)
712 evlist__for_each_entry(pt->session->evlist, evsel) {
713 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
715 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
716 if (config & pt->tsc_bit)
717 timeless_decoding = false;
722 return timeless_decoding;
725 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
727 struct perf_evsel *evsel;
729 evlist__for_each_entry(pt->session->evlist, evsel) {
730 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
731 !evsel->attr.exclude_kernel)
737 static bool intel_pt_have_tsc(struct intel_pt *pt)
739 struct perf_evsel *evsel;
740 bool have_tsc = false;
746 evlist__for_each_entry(pt->session->evlist, evsel) {
747 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
748 if (config & pt->tsc_bit)
757 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
761 quot = ns / pt->tc.time_mult;
762 rem = ns % pt->tc.time_mult;
763 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
767 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
768 unsigned int queue_nr)
770 struct intel_pt_params params = { .get_trace = 0, };
771 struct intel_pt_queue *ptq;
773 ptq = zalloc(sizeof(struct intel_pt_queue));
777 if (pt->synth_opts.callchain) {
778 size_t sz = sizeof(struct ip_callchain);
780 sz += pt->synth_opts.callchain_sz * sizeof(u64);
781 ptq->chain = zalloc(sz);
786 if (pt->synth_opts.last_branch) {
787 size_t sz = sizeof(struct branch_stack);
789 sz += pt->synth_opts.last_branch_sz *
790 sizeof(struct branch_entry);
791 ptq->last_branch = zalloc(sz);
792 if (!ptq->last_branch)
794 ptq->last_branch_rb = zalloc(sz);
795 if (!ptq->last_branch_rb)
799 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
804 ptq->queue_nr = queue_nr;
805 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
811 params.get_trace = intel_pt_get_trace;
812 params.walk_insn = intel_pt_walk_next_insn;
814 params.return_compression = intel_pt_return_compression(pt);
815 params.branch_enable = intel_pt_branch_enable(pt);
816 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
817 params.mtc_period = intel_pt_mtc_period(pt);
818 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
819 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
821 if (pt->filts.cnt > 0)
822 params.pgd_ip = intel_pt_pgd_ip;
824 if (pt->synth_opts.instructions) {
825 if (pt->synth_opts.period) {
826 switch (pt->synth_opts.period_type) {
827 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
829 INTEL_PT_PERIOD_INSTRUCTIONS;
830 params.period = pt->synth_opts.period;
832 case PERF_ITRACE_PERIOD_TICKS:
833 params.period_type = INTEL_PT_PERIOD_TICKS;
834 params.period = pt->synth_opts.period;
836 case PERF_ITRACE_PERIOD_NANOSECS:
837 params.period_type = INTEL_PT_PERIOD_TICKS;
838 params.period = intel_pt_ns_to_ticks(pt,
839 pt->synth_opts.period);
846 if (!params.period) {
847 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
852 ptq->decoder = intel_pt_decoder_new(¶ms);
859 zfree(&ptq->event_buf);
860 zfree(&ptq->last_branch);
861 zfree(&ptq->last_branch_rb);
867 static void intel_pt_free_queue(void *priv)
869 struct intel_pt_queue *ptq = priv;
873 thread__zput(ptq->thread);
874 intel_pt_decoder_free(ptq->decoder);
875 zfree(&ptq->event_buf);
876 zfree(&ptq->last_branch);
877 zfree(&ptq->last_branch_rb);
882 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
883 struct auxtrace_queue *queue)
885 struct intel_pt_queue *ptq = queue->priv;
887 if (queue->tid == -1 || pt->have_sched_switch) {
888 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
889 thread__zput(ptq->thread);
892 if (!ptq->thread && ptq->tid != -1)
893 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
896 ptq->pid = ptq->thread->pid_;
897 if (queue->cpu == -1)
898 ptq->cpu = ptq->thread->cpu;
902 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
904 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
905 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
906 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
907 if (ptq->state->to_ip)
908 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
910 PERF_IP_FLAG_INTERRUPT;
912 ptq->flags = PERF_IP_FLAG_BRANCH |
913 PERF_IP_FLAG_TRACE_END;
916 if (ptq->state->from_ip)
917 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
919 ptq->flags = PERF_IP_FLAG_BRANCH |
920 PERF_IP_FLAG_TRACE_BEGIN;
921 if (ptq->state->flags & INTEL_PT_IN_TX)
922 ptq->flags |= PERF_IP_FLAG_IN_TX;
923 ptq->insn_len = ptq->state->insn_len;
924 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
928 static int intel_pt_setup_queue(struct intel_pt *pt,
929 struct auxtrace_queue *queue,
930 unsigned int queue_nr)
932 struct intel_pt_queue *ptq = queue->priv;
934 if (list_empty(&queue->head))
938 ptq = intel_pt_alloc_queue(pt, queue_nr);
943 if (queue->cpu != -1)
944 ptq->cpu = queue->cpu;
945 ptq->tid = queue->tid;
947 if (pt->sampling_mode) {
948 if (pt->timeless_decoding)
949 ptq->step_through_buffers = true;
950 if (pt->timeless_decoding || !pt->have_sched_switch)
951 ptq->use_buffer_pid_tid = true;
957 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
958 const struct intel_pt_state *state;
961 if (pt->timeless_decoding)
964 intel_pt_log("queue %u getting timestamp\n", queue_nr);
965 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
966 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
968 state = intel_pt_decode(ptq->decoder);
970 if (state->err == INTEL_PT_ERR_NODATA) {
971 intel_pt_log("queue %u has no timestamp\n",
977 if (state->timestamp)
981 ptq->timestamp = state->timestamp;
982 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
983 queue_nr, ptq->timestamp);
985 ptq->have_sample = true;
986 intel_pt_sample_flags(ptq);
987 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
996 static int intel_pt_setup_queues(struct intel_pt *pt)
1001 for (i = 0; i < pt->queues.nr_queues; i++) {
1002 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1009 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
1011 struct branch_stack *bs_src = ptq->last_branch_rb;
1012 struct branch_stack *bs_dst = ptq->last_branch;
1015 bs_dst->nr = bs_src->nr;
1020 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
1021 memcpy(&bs_dst->entries[0],
1022 &bs_src->entries[ptq->last_branch_pos],
1023 sizeof(struct branch_entry) * nr);
1025 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
1026 memcpy(&bs_dst->entries[nr],
1027 &bs_src->entries[0],
1028 sizeof(struct branch_entry) * ptq->last_branch_pos);
1032 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
1034 ptq->last_branch_pos = 0;
1035 ptq->last_branch_rb->nr = 0;
1038 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
1040 const struct intel_pt_state *state = ptq->state;
1041 struct branch_stack *bs = ptq->last_branch_rb;
1042 struct branch_entry *be;
1044 if (!ptq->last_branch_pos)
1045 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
1047 ptq->last_branch_pos -= 1;
1049 be = &bs->entries[ptq->last_branch_pos];
1050 be->from = state->from_ip;
1051 be->to = state->to_ip;
1052 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
1053 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
1054 /* No support for mispredict */
1055 be->flags.mispred = ptq->pt->mispred_all;
1057 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
1061 static int intel_pt_inject_event(union perf_event *event,
1062 struct perf_sample *sample, u64 type,
1065 event->header.size = perf_event__sample_event_size(sample, type, 0);
1066 return perf_event__synthesize_sample(event, type, 0, sample, swapped);
1069 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1072 struct intel_pt *pt = ptq->pt;
1073 union perf_event *event = ptq->event_buf;
1074 struct perf_sample sample = { .ip = 0, };
1075 struct dummy_branch_stack {
1077 struct branch_entry entries;
1080 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1083 if (pt->synth_opts.initial_skip &&
1084 pt->num_events++ < pt->synth_opts.initial_skip)
1087 event->sample.header.type = PERF_RECORD_SAMPLE;
1088 event->sample.header.misc = PERF_RECORD_MISC_USER;
1089 event->sample.header.size = sizeof(struct perf_event_header);
1091 if (!pt->timeless_decoding)
1092 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1094 sample.cpumode = PERF_RECORD_MISC_USER;
1095 sample.ip = ptq->state->from_ip;
1096 sample.pid = ptq->pid;
1097 sample.tid = ptq->tid;
1098 sample.addr = ptq->state->to_ip;
1099 sample.id = ptq->pt->branches_id;
1100 sample.stream_id = ptq->pt->branches_id;
1102 sample.cpu = ptq->cpu;
1103 sample.flags = ptq->flags;
1104 sample.insn_len = ptq->insn_len;
1105 memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1108 * perf report cannot handle events without a branch stack when using
1109 * SORT_MODE__BRANCH so make a dummy one.
1111 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1112 dummy_bs = (struct dummy_branch_stack){
1119 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1122 if (pt->synth_opts.inject) {
1123 ret = intel_pt_inject_event(event, &sample,
1124 pt->branches_sample_type,
1125 pt->synth_needs_swap);
1130 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
1132 pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
1138 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1141 struct intel_pt *pt = ptq->pt;
1142 union perf_event *event = ptq->event_buf;
1143 struct perf_sample sample = { .ip = 0, };
1145 if (pt->synth_opts.initial_skip &&
1146 pt->num_events++ < pt->synth_opts.initial_skip)
1149 event->sample.header.type = PERF_RECORD_SAMPLE;
1150 event->sample.header.misc = PERF_RECORD_MISC_USER;
1151 event->sample.header.size = sizeof(struct perf_event_header);
1153 if (!pt->timeless_decoding)
1154 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1156 sample.cpumode = PERF_RECORD_MISC_USER;
1157 sample.ip = ptq->state->from_ip;
1158 sample.pid = ptq->pid;
1159 sample.tid = ptq->tid;
1160 sample.addr = ptq->state->to_ip;
1161 sample.id = ptq->pt->instructions_id;
1162 sample.stream_id = ptq->pt->instructions_id;
1163 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1164 sample.cpu = ptq->cpu;
1165 sample.flags = ptq->flags;
1166 sample.insn_len = ptq->insn_len;
1167 memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1169 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1171 if (pt->synth_opts.callchain) {
1172 thread_stack__sample(ptq->thread, ptq->chain,
1173 pt->synth_opts.callchain_sz, sample.ip);
1174 sample.callchain = ptq->chain;
1177 if (pt->synth_opts.last_branch) {
1178 intel_pt_copy_last_branch_rb(ptq);
1179 sample.branch_stack = ptq->last_branch;
1182 if (pt->synth_opts.inject) {
1183 ret = intel_pt_inject_event(event, &sample,
1184 pt->instructions_sample_type,
1185 pt->synth_needs_swap);
1190 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
1192 pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
1195 if (pt->synth_opts.last_branch)
1196 intel_pt_reset_last_branch_rb(ptq);
1201 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1204 struct intel_pt *pt = ptq->pt;
1205 union perf_event *event = ptq->event_buf;
1206 struct perf_sample sample = { .ip = 0, };
1208 if (pt->synth_opts.initial_skip &&
1209 pt->num_events++ < pt->synth_opts.initial_skip)
1212 event->sample.header.type = PERF_RECORD_SAMPLE;
1213 event->sample.header.misc = PERF_RECORD_MISC_USER;
1214 event->sample.header.size = sizeof(struct perf_event_header);
1216 if (!pt->timeless_decoding)
1217 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1219 sample.cpumode = PERF_RECORD_MISC_USER;
1220 sample.ip = ptq->state->from_ip;
1221 sample.pid = ptq->pid;
1222 sample.tid = ptq->tid;
1223 sample.addr = ptq->state->to_ip;
1224 sample.id = ptq->pt->transactions_id;
1225 sample.stream_id = ptq->pt->transactions_id;
1227 sample.cpu = ptq->cpu;
1228 sample.flags = ptq->flags;
1229 sample.insn_len = ptq->insn_len;
1230 memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1232 if (pt->synth_opts.callchain) {
1233 thread_stack__sample(ptq->thread, ptq->chain,
1234 pt->synth_opts.callchain_sz, sample.ip);
1235 sample.callchain = ptq->chain;
1238 if (pt->synth_opts.last_branch) {
1239 intel_pt_copy_last_branch_rb(ptq);
1240 sample.branch_stack = ptq->last_branch;
1243 if (pt->synth_opts.inject) {
1244 ret = intel_pt_inject_event(event, &sample,
1245 pt->transactions_sample_type,
1246 pt->synth_needs_swap);
1251 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
1253 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
1256 if (pt->synth_opts.last_branch)
1257 intel_pt_reset_last_branch_rb(ptq);
1262 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1263 pid_t pid, pid_t tid, u64 ip)
1265 union perf_event event;
1266 char msg[MAX_AUXTRACE_ERROR_MSG];
1269 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1271 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1272 code, cpu, pid, tid, ip, msg);
1274 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1276 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1282 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1284 struct auxtrace_queue *queue;
1285 pid_t tid = ptq->next_tid;
1291 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1293 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1295 queue = &pt->queues.queue_array[ptq->queue_nr];
1296 intel_pt_set_pid_tid_cpu(pt, queue);
1303 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1305 struct intel_pt *pt = ptq->pt;
1307 return ip == pt->switch_ip &&
1308 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1309 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1310 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1313 static int intel_pt_sample(struct intel_pt_queue *ptq)
1315 const struct intel_pt_state *state = ptq->state;
1316 struct intel_pt *pt = ptq->pt;
1319 if (!ptq->have_sample)
1322 ptq->have_sample = false;
1324 if (pt->sample_instructions &&
1325 (state->type & INTEL_PT_INSTRUCTION)) {
1326 err = intel_pt_synth_instruction_sample(ptq);
1331 if (pt->sample_transactions &&
1332 (state->type & INTEL_PT_TRANSACTION)) {
1333 err = intel_pt_synth_transaction_sample(ptq);
1338 if (!(state->type & INTEL_PT_BRANCH))
1341 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1342 thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
1343 state->to_ip, ptq->insn_len,
1346 thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
1348 if (pt->sample_branches) {
1349 err = intel_pt_synth_branch_sample(ptq);
1354 if (pt->synth_opts.last_branch)
1355 intel_pt_update_last_branch_rb(ptq);
1357 if (!pt->sync_switch)
1360 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1361 switch (ptq->switch_state) {
1362 case INTEL_PT_SS_UNKNOWN:
1363 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1364 err = intel_pt_next_tid(pt, ptq);
1367 ptq->switch_state = INTEL_PT_SS_TRACING;
1370 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1373 } else if (!state->to_ip) {
1374 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1375 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1376 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1377 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1378 state->to_ip == pt->ptss_ip &&
1379 (ptq->flags & PERF_IP_FLAG_CALL)) {
1380 ptq->switch_state = INTEL_PT_SS_TRACING;
1386 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1388 struct machine *machine = pt->machine;
1390 struct symbol *sym, *start;
1391 u64 ip, switch_ip = 0;
1397 map = machine__kernel_map(machine);
1404 start = dso__first_symbol(map->dso, MAP__FUNCTION);
1406 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1407 if (sym->binding == STB_GLOBAL &&
1408 !strcmp(sym->name, "__switch_to")) {
1409 ip = map->unmap_ip(map, sym->start);
1410 if (ip >= map->start && ip < map->end) {
1417 if (!switch_ip || !ptss_ip)
1420 if (pt->have_sched_switch == 1)
1421 ptss = "perf_trace_sched_switch";
1423 ptss = "__perf_event_task_sched_out";
1425 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1426 if (!strcmp(sym->name, ptss)) {
1427 ip = map->unmap_ip(map, sym->start);
1428 if (ip >= map->start && ip < map->end) {
1438 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1440 const struct intel_pt_state *state = ptq->state;
1441 struct intel_pt *pt = ptq->pt;
1444 if (!pt->kernel_start) {
1445 pt->kernel_start = machine__kernel_start(pt->machine);
1446 if (pt->per_cpu_mmaps &&
1447 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
1448 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1449 !pt->sampling_mode) {
1450 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
1451 if (pt->switch_ip) {
1452 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1453 pt->switch_ip, pt->ptss_ip);
1454 pt->sync_switch = true;
1459 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1460 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1462 err = intel_pt_sample(ptq);
1466 state = intel_pt_decode(ptq->decoder);
1468 if (state->err == INTEL_PT_ERR_NODATA)
1470 if (pt->sync_switch &&
1471 state->from_ip >= pt->kernel_start) {
1472 pt->sync_switch = false;
1473 intel_pt_next_tid(pt, ptq);
1475 if (pt->synth_opts.errors) {
1476 err = intel_pt_synth_error(pt, state->err,
1487 ptq->have_sample = true;
1488 intel_pt_sample_flags(ptq);
1490 /* Use estimated TSC upon return to user space */
1492 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1493 state->to_ip && state->to_ip < pt->kernel_start) {
1494 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1495 state->timestamp, state->est_timestamp);
1496 ptq->timestamp = state->est_timestamp;
1497 /* Use estimated TSC in unknown switch state */
1498 } else if (pt->sync_switch &&
1499 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1500 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1501 ptq->next_tid == -1) {
1502 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1503 state->timestamp, state->est_timestamp);
1504 ptq->timestamp = state->est_timestamp;
1505 } else if (state->timestamp > ptq->timestamp) {
1506 ptq->timestamp = state->timestamp;
1509 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1510 *timestamp = ptq->timestamp;
1517 static inline int intel_pt_update_queues(struct intel_pt *pt)
1519 if (pt->queues.new_data) {
1520 pt->queues.new_data = false;
1521 return intel_pt_setup_queues(pt);
1526 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1528 unsigned int queue_nr;
1533 struct auxtrace_queue *queue;
1534 struct intel_pt_queue *ptq;
1536 if (!pt->heap.heap_cnt)
1539 if (pt->heap.heap_array[0].ordinal >= timestamp)
1542 queue_nr = pt->heap.heap_array[0].queue_nr;
1543 queue = &pt->queues.queue_array[queue_nr];
1546 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1547 queue_nr, pt->heap.heap_array[0].ordinal,
1550 auxtrace_heap__pop(&pt->heap);
1552 if (pt->heap.heap_cnt) {
1553 ts = pt->heap.heap_array[0].ordinal + 1;
1560 intel_pt_set_pid_tid_cpu(pt, queue);
1562 ret = intel_pt_run_decoder(ptq, &ts);
1565 auxtrace_heap__add(&pt->heap, queue_nr, ts);
1570 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1574 ptq->on_heap = false;
1581 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1584 struct auxtrace_queues *queues = &pt->queues;
1588 for (i = 0; i < queues->nr_queues; i++) {
1589 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1590 struct intel_pt_queue *ptq = queue->priv;
1592 if (ptq && (tid == -1 || ptq->tid == tid)) {
1594 intel_pt_set_pid_tid_cpu(pt, queue);
1595 intel_pt_run_decoder(ptq, &ts);
1601 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1603 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1604 sample->pid, sample->tid, 0);
1607 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1611 if (cpu < 0 || !pt->queues.nr_queues)
1614 if ((unsigned)cpu >= pt->queues.nr_queues)
1615 i = pt->queues.nr_queues - 1;
1619 if (pt->queues.queue_array[i].cpu == cpu)
1620 return pt->queues.queue_array[i].priv;
1622 for (j = 0; i > 0; j++) {
1623 if (pt->queues.queue_array[--i].cpu == cpu)
1624 return pt->queues.queue_array[i].priv;
1627 for (; j < pt->queues.nr_queues; j++) {
1628 if (pt->queues.queue_array[j].cpu == cpu)
1629 return pt->queues.queue_array[j].priv;
1635 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1638 struct intel_pt_queue *ptq;
1641 if (!pt->sync_switch)
1644 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1648 switch (ptq->switch_state) {
1649 case INTEL_PT_SS_NOT_TRACING:
1652 case INTEL_PT_SS_UNKNOWN:
1653 case INTEL_PT_SS_TRACING:
1654 ptq->next_tid = tid;
1655 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1657 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1658 if (!ptq->on_heap) {
1659 ptq->timestamp = perf_time_to_tsc(timestamp,
1661 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1665 ptq->on_heap = true;
1667 ptq->switch_state = INTEL_PT_SS_TRACING;
1669 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1670 ptq->next_tid = tid;
1671 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1680 static int intel_pt_process_switch(struct intel_pt *pt,
1681 struct perf_sample *sample)
1683 struct perf_evsel *evsel;
1687 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1688 if (evsel != pt->switch_evsel)
1691 tid = perf_evsel__intval(evsel, sample, "next_pid");
1694 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1695 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1698 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1702 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1705 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1706 struct perf_sample *sample)
1708 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1714 if (pt->have_sched_switch == 3) {
1717 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1718 pr_err("Expecting CPU-wide context switch event\n");
1721 pid = event->context_switch.next_prev_pid;
1722 tid = event->context_switch.next_prev_tid;
1731 pr_err("context_switch event has no tid\n");
1735 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1736 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1739 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1743 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1746 static int intel_pt_process_itrace_start(struct intel_pt *pt,
1747 union perf_event *event,
1748 struct perf_sample *sample)
1750 if (!pt->per_cpu_mmaps)
1753 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1754 sample->cpu, event->itrace_start.pid,
1755 event->itrace_start.tid, sample->time,
1756 perf_time_to_tsc(sample->time, &pt->tc));
1758 return machine__set_current_tid(pt->machine, sample->cpu,
1759 event->itrace_start.pid,
1760 event->itrace_start.tid);
1763 static int intel_pt_process_event(struct perf_session *session,
1764 union perf_event *event,
1765 struct perf_sample *sample,
1766 struct perf_tool *tool)
1768 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1776 if (!tool->ordered_events) {
1777 pr_err("Intel Processor Trace requires ordered events\n");
1781 if (sample->time && sample->time != (u64)-1)
1782 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
1786 if (timestamp || pt->timeless_decoding) {
1787 err = intel_pt_update_queues(pt);
1792 if (pt->timeless_decoding) {
1793 if (event->header.type == PERF_RECORD_EXIT) {
1794 err = intel_pt_process_timeless_queues(pt,
1798 } else if (timestamp) {
1799 err = intel_pt_process_queues(pt, timestamp);
1804 if (event->header.type == PERF_RECORD_AUX &&
1805 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
1806 pt->synth_opts.errors) {
1807 err = intel_pt_lost(pt, sample);
1812 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
1813 err = intel_pt_process_switch(pt, sample);
1814 else if (event->header.type == PERF_RECORD_ITRACE_START)
1815 err = intel_pt_process_itrace_start(pt, event, sample);
1816 else if (event->header.type == PERF_RECORD_SWITCH ||
1817 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
1818 err = intel_pt_context_switch(pt, event, sample);
1820 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
1821 perf_event__name(event->header.type), event->header.type,
1822 sample->cpu, sample->time, timestamp);
1827 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
1829 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1836 if (!tool->ordered_events)
1839 ret = intel_pt_update_queues(pt);
1843 if (pt->timeless_decoding)
1844 return intel_pt_process_timeless_queues(pt, -1,
1847 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
1850 static void intel_pt_free_events(struct perf_session *session)
1852 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1854 struct auxtrace_queues *queues = &pt->queues;
1857 for (i = 0; i < queues->nr_queues; i++) {
1858 intel_pt_free_queue(queues->queue_array[i].priv);
1859 queues->queue_array[i].priv = NULL;
1861 intel_pt_log_disable();
1862 auxtrace_queues__free(queues);
1865 static void intel_pt_free(struct perf_session *session)
1867 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1870 auxtrace_heap__free(&pt->heap);
1871 intel_pt_free_events(session);
1872 session->auxtrace = NULL;
1873 thread__put(pt->unknown_thread);
1874 addr_filters__exit(&pt->filts);
1879 static int intel_pt_process_auxtrace_event(struct perf_session *session,
1880 union perf_event *event,
1881 struct perf_tool *tool __maybe_unused)
1883 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1886 if (pt->sampling_mode)
1889 if (!pt->data_queued) {
1890 struct auxtrace_buffer *buffer;
1892 int fd = perf_data_file__fd(session->file);
1895 if (perf_data_file__is_pipe(session->file)) {
1898 data_offset = lseek(fd, 0, SEEK_CUR);
1899 if (data_offset == -1)
1903 err = auxtrace_queues__add_event(&pt->queues, session, event,
1904 data_offset, &buffer);
1908 /* Dump here now we have copied a piped trace out of the pipe */
1910 if (auxtrace_buffer__get_data(buffer, fd)) {
1911 intel_pt_dump_event(pt, buffer->data,
1913 auxtrace_buffer__put_data(buffer);
1921 struct intel_pt_synth {
1922 struct perf_tool dummy_tool;
1923 struct perf_session *session;
1926 static int intel_pt_event_synth(struct perf_tool *tool,
1927 union perf_event *event,
1928 struct perf_sample *sample __maybe_unused,
1929 struct machine *machine __maybe_unused)
1931 struct intel_pt_synth *intel_pt_synth =
1932 container_of(tool, struct intel_pt_synth, dummy_tool);
1934 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
1938 static int intel_pt_synth_event(struct perf_session *session,
1939 struct perf_event_attr *attr, u64 id)
1941 struct intel_pt_synth intel_pt_synth;
1943 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
1944 intel_pt_synth.session = session;
1946 return perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
1947 &id, intel_pt_event_synth);
1950 static int intel_pt_synth_events(struct intel_pt *pt,
1951 struct perf_session *session)
1953 struct perf_evlist *evlist = session->evlist;
1954 struct perf_evsel *evsel;
1955 struct perf_event_attr attr;
1960 evlist__for_each_entry(evlist, evsel) {
1961 if (evsel->attr.type == pt->pmu_type && evsel->ids) {
1968 pr_debug("There are no selected events with Intel Processor Trace data\n");
1972 memset(&attr, 0, sizeof(struct perf_event_attr));
1973 attr.size = sizeof(struct perf_event_attr);
1974 attr.type = PERF_TYPE_HARDWARE;
1975 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
1976 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1978 if (pt->timeless_decoding)
1979 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1981 attr.sample_type |= PERF_SAMPLE_TIME;
1982 if (!pt->per_cpu_mmaps)
1983 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
1984 attr.exclude_user = evsel->attr.exclude_user;
1985 attr.exclude_kernel = evsel->attr.exclude_kernel;
1986 attr.exclude_hv = evsel->attr.exclude_hv;
1987 attr.exclude_host = evsel->attr.exclude_host;
1988 attr.exclude_guest = evsel->attr.exclude_guest;
1989 attr.sample_id_all = evsel->attr.sample_id_all;
1990 attr.read_format = evsel->attr.read_format;
1992 id = evsel->id[0] + 1000000000;
1996 if (pt->synth_opts.instructions) {
1997 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1998 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
1999 attr.sample_period =
2000 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2002 attr.sample_period = pt->synth_opts.period;
2003 pt->instructions_sample_period = attr.sample_period;
2004 if (pt->synth_opts.callchain)
2005 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2006 if (pt->synth_opts.last_branch)
2007 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2008 pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2009 id, (u64)attr.sample_type);
2010 err = intel_pt_synth_event(session, &attr, id);
2012 pr_err("%s: failed to synthesize 'instructions' event type\n",
2016 pt->sample_instructions = true;
2017 pt->instructions_sample_type = attr.sample_type;
2018 pt->instructions_id = id;
2022 if (pt->synth_opts.transactions) {
2023 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2024 attr.sample_period = 1;
2025 if (pt->synth_opts.callchain)
2026 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2027 if (pt->synth_opts.last_branch)
2028 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2029 pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2030 id, (u64)attr.sample_type);
2031 err = intel_pt_synth_event(session, &attr, id);
2033 pr_err("%s: failed to synthesize 'transactions' event type\n",
2037 pt->sample_transactions = true;
2038 pt->transactions_sample_type = attr.sample_type;
2039 pt->transactions_id = id;
2041 evlist__for_each_entry(evlist, evsel) {
2042 if (evsel->id && evsel->id[0] == pt->transactions_id) {
2044 zfree(&evsel->name);
2045 evsel->name = strdup("transactions");
2051 if (pt->synth_opts.branches) {
2052 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2053 attr.sample_period = 1;
2054 attr.sample_type |= PERF_SAMPLE_ADDR;
2055 attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
2056 attr.sample_type &= ~(u64)PERF_SAMPLE_BRANCH_STACK;
2057 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2058 id, (u64)attr.sample_type);
2059 err = intel_pt_synth_event(session, &attr, id);
2061 pr_err("%s: failed to synthesize 'branches' event type\n",
2065 pt->sample_branches = true;
2066 pt->branches_sample_type = attr.sample_type;
2067 pt->branches_id = id;
2070 pt->synth_needs_swap = evsel->needs_swap;
2075 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
2077 struct perf_evsel *evsel;
2079 evlist__for_each_entry_reverse(evlist, evsel) {
2080 const char *name = perf_evsel__name(evsel);
2082 if (!strcmp(name, "sched:sched_switch"))
2089 static bool intel_pt_find_switch(struct perf_evlist *evlist)
2091 struct perf_evsel *evsel;
2093 evlist__for_each_entry(evlist, evsel) {
2094 if (evsel->attr.context_switch)
2101 static int intel_pt_perf_config(const char *var, const char *value, void *data)
2103 struct intel_pt *pt = data;
2105 if (!strcmp(var, "intel-pt.mispred-all"))
2106 pt->mispred_all = perf_config_bool(var, value);
2111 static const char * const intel_pt_info_fmts[] = {
2112 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
2113 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
2114 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
2115 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
2116 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
2117 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
2118 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
2119 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
2120 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
2121 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
2122 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
2123 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
2124 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
2125 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
2126 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
2127 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
2130 static void intel_pt_print_info(u64 *arr, int start, int finish)
2137 for (i = start; i <= finish; i++)
2138 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
2141 static void intel_pt_print_info_str(const char *name, const char *str)
2146 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
2149 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
2151 return auxtrace_info->header.size >=
2152 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
2155 int intel_pt_process_auxtrace_info(union perf_event *event,
2156 struct perf_session *session)
2158 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
2159 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
2160 struct intel_pt *pt;
2165 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
2169 pt = zalloc(sizeof(struct intel_pt));
2173 addr_filters__init(&pt->filts);
2175 err = perf_config(intel_pt_perf_config, pt);
2179 err = auxtrace_queues__init(&pt->queues);
2183 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
2185 pt->session = session;
2186 pt->machine = &session->machines.host; /* No kvm support */
2187 pt->auxtrace_type = auxtrace_info->type;
2188 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
2189 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
2190 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
2191 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
2192 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
2193 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
2194 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
2195 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
2196 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
2197 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
2198 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
2199 INTEL_PT_PER_CPU_MMAPS);
2201 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
2202 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
2203 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
2204 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
2205 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
2206 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
2207 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
2211 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
2212 pt->max_non_turbo_ratio =
2213 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
2214 intel_pt_print_info(&auxtrace_info->priv[0],
2215 INTEL_PT_MAX_NONTURBO_RATIO,
2216 INTEL_PT_MAX_NONTURBO_RATIO);
2219 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
2220 info_end = (void *)info + auxtrace_info->header.size;
2222 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
2225 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
2226 intel_pt_print_info(&auxtrace_info->priv[0],
2227 INTEL_PT_FILTER_STR_LEN,
2228 INTEL_PT_FILTER_STR_LEN);
2230 const char *filter = (const char *)info;
2232 len = roundup(len + 1, 8);
2234 if ((void *)info > info_end) {
2235 pr_err("%s: bad filter string length\n", __func__);
2237 goto err_free_queues;
2239 pt->filter = memdup(filter, len);
2242 goto err_free_queues;
2244 if (session->header.needs_swap)
2245 mem_bswap_64(pt->filter, len);
2246 if (pt->filter[len - 1]) {
2247 pr_err("%s: filter string not null terminated\n", __func__);
2249 goto err_free_queues;
2251 err = addr_filters__parse_bare_filter(&pt->filts,
2254 goto err_free_queues;
2256 intel_pt_print_info_str("Filter string", pt->filter);
2259 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
2260 pt->have_tsc = intel_pt_have_tsc(pt);
2261 pt->sampling_mode = false;
2262 pt->est_tsc = !pt->timeless_decoding;
2264 pt->unknown_thread = thread__new(999999999, 999999999);
2265 if (!pt->unknown_thread) {
2267 goto err_free_queues;
2271 * Since this thread will not be kept in any rbtree not in a
2272 * list, initialize its list node so that at thread__put() the
2273 * current thread lifetime assuption is kept and we don't segfault
2274 * at list_del_init().
2276 INIT_LIST_HEAD(&pt->unknown_thread->node);
2278 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2280 goto err_delete_thread;
2281 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
2283 goto err_delete_thread;
2286 pt->auxtrace.process_event = intel_pt_process_event;
2287 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
2288 pt->auxtrace.flush_events = intel_pt_flush;
2289 pt->auxtrace.free_events = intel_pt_free_events;
2290 pt->auxtrace.free = intel_pt_free;
2291 session->auxtrace = &pt->auxtrace;
2296 if (pt->have_sched_switch == 1) {
2297 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
2298 if (!pt->switch_evsel) {
2299 pr_err("%s: missing sched_switch event\n", __func__);
2301 goto err_delete_thread;
2303 } else if (pt->have_sched_switch == 2 &&
2304 !intel_pt_find_switch(session->evlist)) {
2305 pr_err("%s: missing context_switch attribute flag\n", __func__);
2307 goto err_delete_thread;
2310 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2311 pt->synth_opts = *session->itrace_synth_opts;
2313 itrace_synth_opts__set_default(&pt->synth_opts);
2314 if (use_browser != -1) {
2315 pt->synth_opts.branches = false;
2316 pt->synth_opts.callchain = true;
2318 if (session->itrace_synth_opts)
2319 pt->synth_opts.thread_stack =
2320 session->itrace_synth_opts->thread_stack;
2323 if (pt->synth_opts.log)
2324 intel_pt_log_enable();
2326 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2327 if (pt->tc.time_mult) {
2328 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
2330 if (!pt->max_non_turbo_ratio)
2331 pt->max_non_turbo_ratio =
2332 (tsc_freq + 50000000) / 100000000;
2333 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
2334 intel_pt_log("Maximum non-turbo ratio %u\n",
2335 pt->max_non_turbo_ratio);
2338 if (pt->synth_opts.calls)
2339 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
2340 PERF_IP_FLAG_TRACE_END;
2341 if (pt->synth_opts.returns)
2342 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2343 PERF_IP_FLAG_TRACE_BEGIN;
2345 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2346 symbol_conf.use_callchain = true;
2347 if (callchain_register_param(&callchain_param) < 0) {
2348 symbol_conf.use_callchain = false;
2349 pt->synth_opts.callchain = false;
2353 err = intel_pt_synth_events(pt, session);
2355 goto err_delete_thread;
2357 err = auxtrace_queues__process_index(&pt->queues, session);
2359 goto err_delete_thread;
2361 if (pt->queues.populated)
2362 pt->data_queued = true;
2364 if (pt->timeless_decoding)
2365 pr_debug2("Intel PT decoding without timestamps\n");
2370 thread__zput(pt->unknown_thread);
2372 intel_pt_log_disable();
2373 auxtrace_queues__free(&pt->queues);
2374 session->auxtrace = NULL;
2376 addr_filters__exit(&pt->filts);