1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/log2.h>
13 #include <linux/zalloc.h>
16 #include "../../perf.h"
17 #include "../../util/session.h"
18 #include "../../util/event.h"
19 #include "../../util/evlist.h"
20 #include "../../util/evsel.h"
21 #include "../../util/cpumap.h"
22 #include <subcmd/parse-options.h>
23 #include "../../util/parse-events.h"
24 #include "../../util/pmu.h"
25 #include "../../util/debug.h"
26 #include "../../util/auxtrace.h"
27 #include "../../util/tsc.h"
28 #include "../../util/intel-pt.h"
30 #define KiB(x) ((x) * 1024)
31 #define MiB(x) ((x) * 1024 * 1024)
32 #define KiB_MASK(x) (KiB(x) - 1)
33 #define MiB_MASK(x) (MiB(x) - 1)
35 #define INTEL_PT_PSB_PERIOD_NEAR 256
37 struct intel_pt_snapshot_ref {
43 struct intel_pt_recording {
44 struct auxtrace_record itr;
45 struct perf_pmu *intel_pt_pmu;
46 int have_sched_switch;
47 struct evlist *evlist;
49 bool snapshot_init_done;
51 size_t snapshot_ref_buf_size;
53 struct intel_pt_snapshot_ref *snapshot_refs;
57 static int intel_pt_parse_terms_with_default(struct list_head *formats,
61 struct list_head *terms;
62 struct perf_event_attr attr = { .size = 0, };
65 terms = malloc(sizeof(struct list_head));
69 INIT_LIST_HEAD(terms);
71 err = parse_events_terms(terms, str);
75 attr.config = *config;
76 err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
80 *config = attr.config;
82 parse_events_terms__delete(terms);
86 static int intel_pt_parse_terms(struct list_head *formats, const char *str,
90 return intel_pt_parse_terms_with_default(formats, str, config);
93 static u64 intel_pt_masked_bits(u64 mask, u64 bits)
95 const u64 top_bit = 1ULL << 63;
99 for (i = 0; i < 64; i++) {
100 if (mask & top_bit) {
112 static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
113 struct evlist *evlist, u64 *res)
120 mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
124 evlist__for_each_entry(evlist, evsel) {
125 if (evsel->core.attr.type == intel_pt_pmu->type) {
126 *res = intel_pt_masked_bits(mask, evsel->core.attr.config);
134 static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
135 struct evlist *evlist)
138 int err, topa_multiple_entries;
141 if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
142 "%d", &topa_multiple_entries) != 1)
143 topa_multiple_entries = 0;
146 * Use caps/topa_multiple_entries to indicate early hardware that had
147 * extra frequent PSBs.
149 if (!topa_multiple_entries) {
154 err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
158 psb_period = 1 << (val + 11);
160 pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
164 static int intel_pt_pick_bit(int bits, int target)
168 for (pos = 0; bits; bits >>= 1, pos++) {
170 if (pos <= target || pick < 0)
180 static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
183 int mtc, mtc_periods = 0, mtc_period;
184 int psb_cyc, psb_periods, psb_period;
189 pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
191 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
196 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
200 mtc_period = intel_pt_pick_bit(mtc_periods, 3);
201 pos += scnprintf(buf + pos, sizeof(buf) - pos,
202 ",mtc,mtc_period=%d", mtc_period);
206 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
210 if (psb_cyc && mtc_periods) {
211 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
215 psb_period = intel_pt_pick_bit(psb_periods, 3);
216 pos += scnprintf(buf + pos, sizeof(buf) - pos,
217 ",psb_period=%d", psb_period);
221 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
222 perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1)
223 pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
225 pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
227 intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
232 static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
233 struct record_opts *opts,
236 struct intel_pt_recording *ptr =
237 container_of(itr, struct intel_pt_recording, itr);
238 unsigned long long snapshot_size = 0;
242 snapshot_size = strtoull(str, &endptr, 0);
243 if (*endptr || snapshot_size > SIZE_MAX)
247 opts->auxtrace_snapshot_mode = true;
248 opts->auxtrace_snapshot_size = snapshot_size;
250 ptr->snapshot_size = snapshot_size;
255 struct perf_event_attr *
256 intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
258 struct perf_event_attr *attr;
260 attr = zalloc(sizeof(struct perf_event_attr));
264 attr->config = intel_pt_default_config(intel_pt_pmu);
266 intel_pt_pmu->selectable = true;
271 static const char *intel_pt_find_filter(struct evlist *evlist,
272 struct perf_pmu *intel_pt_pmu)
276 evlist__for_each_entry(evlist, evsel) {
277 if (evsel->core.attr.type == intel_pt_pmu->type)
278 return evsel->filter;
284 static size_t intel_pt_filter_bytes(const char *filter)
286 size_t len = filter ? strlen(filter) : 0;
288 return len ? roundup(len + 1, 8) : 0;
292 intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist)
294 struct intel_pt_recording *ptr =
295 container_of(itr, struct intel_pt_recording, itr);
296 const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu);
298 ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) +
299 intel_pt_filter_bytes(filter);
301 return ptr->priv_size;
304 static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
306 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
308 __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
313 static int intel_pt_info_fill(struct auxtrace_record *itr,
314 struct perf_session *session,
315 struct auxtrace_info_event *auxtrace_info,
318 struct intel_pt_recording *ptr =
319 container_of(itr, struct intel_pt_recording, itr);
320 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
321 struct perf_event_mmap_page *pc;
322 struct perf_tsc_conversion tc = { .time_mult = 0, };
323 bool cap_user_time_zero = false, per_cpu_mmaps;
324 u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
325 u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
326 unsigned long max_non_turbo_ratio;
327 size_t filter_str_len;
332 if (priv_size != ptr->priv_size)
335 intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
336 intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
338 intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
339 mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
341 intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
343 intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
345 if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio",
346 "%lu", &max_non_turbo_ratio) != 1)
347 max_non_turbo_ratio = 0;
349 filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
350 filter_str_len = filter ? strlen(filter) : 0;
352 if (!session->evlist->nr_mmaps)
355 pc = session->evlist->mmap[0].base;
357 err = perf_read_tsc_conversion(pc, &tc);
359 if (err != -EOPNOTSUPP)
362 cap_user_time_zero = tc.time_mult != 0;
364 if (!cap_user_time_zero)
365 ui__warning("Intel Processor Trace: TSC not available\n");
368 per_cpu_mmaps = !cpu_map__empty(session->evlist->core.cpus);
370 auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
371 auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
372 auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
373 auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
374 auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
375 auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
376 auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
377 auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
378 auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
379 auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
380 auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
381 auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
382 auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
383 auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
384 auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
385 auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
386 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio;
387 auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len;
389 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
391 if (filter_str_len) {
392 size_t len = intel_pt_filter_bytes(filter);
394 strncpy((char *)info, filter, len);
401 static int intel_pt_track_switches(struct evlist *evlist)
403 const char *sched_switch = "sched:sched_switch";
407 if (!perf_evlist__can_select_event(evlist, sched_switch))
410 err = parse_events(evlist, sched_switch, NULL);
412 pr_debug2("%s: failed to parse %s, error %d\n",
413 __func__, sched_switch, err);
417 evsel = perf_evlist__last(evlist);
419 perf_evsel__set_sample_bit(evsel, CPU);
420 perf_evsel__set_sample_bit(evsel, TIME);
422 evsel->system_wide = true;
423 evsel->no_aux_samples = true;
424 evsel->immediate = true;
429 static void intel_pt_valid_str(char *str, size_t len, u64 valid)
431 unsigned int val, last = 0, state = 1;
436 for (val = 0; val <= 64; val++, valid >>= 1) {
441 p += scnprintf(str + p, len - p, ",");
444 p += scnprintf(str + p, len - p, "%u", val);
459 p += scnprintf(str + p, len - p, ",%u", last);
463 p += scnprintf(str + p, len - p, "-%u", last);
475 static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
476 const char *caps, const char *name,
477 const char *supported, u64 config)
481 unsigned long long valid;
485 if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
489 perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
494 bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
498 for (shift = 0; bits && !(bits & 1); shift++)
506 if (valid & (1 << config))
509 intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
510 pr_err("Invalid %s for %s. Valid values are: %s\n",
511 name, INTEL_PT_PMU_NAME, valid_str);
515 static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
525 * If supported, force pass-through config term (pt=1) even if user
526 * sets pt=0, which avoids senseless kernel errors.
528 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
529 !(evsel->core.attr.config & 1)) {
530 pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
531 evsel->core.attr.config |= 1;
534 err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
535 "cyc_thresh", "caps/psb_cyc",
536 evsel->core.attr.config);
540 err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
541 "mtc_period", "caps/mtc",
542 evsel->core.attr.config);
546 return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
547 "psb_period", "caps/psb_cyc",
548 evsel->core.attr.config);
551 static int intel_pt_recording_options(struct auxtrace_record *itr,
552 struct evlist *evlist,
553 struct record_opts *opts)
555 struct intel_pt_recording *ptr =
556 container_of(itr, struct intel_pt_recording, itr);
557 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
558 bool have_timing_info, need_immediate = false;
559 struct evsel *evsel, *intel_pt_evsel = NULL;
560 const struct perf_cpu_map *cpus = evlist->core.cpus;
561 bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
565 ptr->evlist = evlist;
566 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
568 evlist__for_each_entry(evlist, evsel) {
569 if (evsel->core.attr.type == intel_pt_pmu->type) {
570 if (intel_pt_evsel) {
571 pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
574 evsel->core.attr.freq = 0;
575 evsel->core.attr.sample_period = 1;
576 intel_pt_evsel = evsel;
577 opts->full_auxtrace = true;
581 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
582 pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
586 if (opts->use_clockid) {
587 pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
591 if (!opts->full_auxtrace)
594 err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
598 /* Set default sizes for snapshot mode */
599 if (opts->auxtrace_snapshot_mode) {
600 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
602 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
604 opts->auxtrace_mmap_pages = MiB(4) / page_size;
606 opts->auxtrace_mmap_pages = KiB(128) / page_size;
607 if (opts->mmap_pages == UINT_MAX)
608 opts->mmap_pages = KiB(256) / page_size;
610 } else if (!opts->auxtrace_mmap_pages && !privileged &&
611 opts->mmap_pages == UINT_MAX) {
612 opts->mmap_pages = KiB(256) / page_size;
614 if (!opts->auxtrace_snapshot_size)
615 opts->auxtrace_snapshot_size =
616 opts->auxtrace_mmap_pages * (size_t)page_size;
617 if (!opts->auxtrace_mmap_pages) {
618 size_t sz = opts->auxtrace_snapshot_size;
620 sz = round_up(sz, page_size) / page_size;
621 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
623 if (opts->auxtrace_snapshot_size >
624 opts->auxtrace_mmap_pages * (size_t)page_size) {
625 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
626 opts->auxtrace_snapshot_size,
627 opts->auxtrace_mmap_pages * (size_t)page_size);
630 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
631 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
634 pr_debug2("Intel PT snapshot size: %zu\n",
635 opts->auxtrace_snapshot_size);
637 opts->auxtrace_snapshot_size <= psb_period +
638 INTEL_PT_PSB_PERIOD_NEAR)
639 ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
640 opts->auxtrace_snapshot_size, psb_period);
643 /* Set default sizes for full trace mode */
644 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
646 opts->auxtrace_mmap_pages = MiB(4) / page_size;
648 opts->auxtrace_mmap_pages = KiB(128) / page_size;
649 if (opts->mmap_pages == UINT_MAX)
650 opts->mmap_pages = KiB(256) / page_size;
654 /* Validate auxtrace_mmap_pages */
655 if (opts->auxtrace_mmap_pages) {
656 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
659 if (opts->auxtrace_snapshot_mode)
664 if (sz < min_sz || !is_power_of_2(sz)) {
665 pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
671 intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
673 if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
674 have_timing_info = true;
676 have_timing_info = false;
679 * Per-cpu recording needs sched_switch events to distinguish different
682 if (have_timing_info && !cpu_map__empty(cpus)) {
683 if (perf_can_record_switch_events()) {
684 bool cpu_wide = !target__none(&opts->target) &&
685 !target__has_task(&opts->target);
687 if (!cpu_wide && perf_can_record_cpu_wide()) {
688 struct evsel *switch_evsel;
690 err = parse_events(evlist, "dummy:u", NULL);
694 switch_evsel = perf_evlist__last(evlist);
696 switch_evsel->core.attr.freq = 0;
697 switch_evsel->core.attr.sample_period = 1;
698 switch_evsel->core.attr.context_switch = 1;
700 switch_evsel->system_wide = true;
701 switch_evsel->no_aux_samples = true;
702 switch_evsel->immediate = true;
704 perf_evsel__set_sample_bit(switch_evsel, TID);
705 perf_evsel__set_sample_bit(switch_evsel, TIME);
706 perf_evsel__set_sample_bit(switch_evsel, CPU);
707 perf_evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
709 opts->record_switch_events = false;
710 ptr->have_sched_switch = 3;
712 opts->record_switch_events = true;
713 need_immediate = true;
715 ptr->have_sched_switch = 3;
717 ptr->have_sched_switch = 2;
720 err = intel_pt_track_switches(evlist);
722 pr_debug2("Unable to select sched:sched_switch\n");
726 ptr->have_sched_switch = 1;
730 if (intel_pt_evsel) {
732 * To obtain the auxtrace buffer file descriptor, the auxtrace
733 * event must come first.
735 perf_evlist__to_front(evlist, intel_pt_evsel);
737 * In the case of per-cpu mmaps, we need the CPU on the
740 if (!cpu_map__empty(cpus))
741 perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
744 /* Add dummy event to keep tracking */
745 if (opts->full_auxtrace) {
746 struct evsel *tracking_evsel;
748 err = parse_events(evlist, "dummy:u", NULL);
752 tracking_evsel = perf_evlist__last(evlist);
754 perf_evlist__set_tracking_event(evlist, tracking_evsel);
756 tracking_evsel->core.attr.freq = 0;
757 tracking_evsel->core.attr.sample_period = 1;
759 tracking_evsel->no_aux_samples = true;
761 tracking_evsel->immediate = true;
763 /* In per-cpu case, always need the time of mmap events etc */
764 if (!cpu_map__empty(cpus)) {
765 perf_evsel__set_sample_bit(tracking_evsel, TIME);
766 /* And the CPU for switch events */
767 perf_evsel__set_sample_bit(tracking_evsel, CPU);
769 perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
773 * Warn the user when we do not have enough information to decode i.e.
774 * per-cpu with no sched_switch (except workload-only).
776 if (!ptr->have_sched_switch && !cpu_map__empty(cpus) &&
777 !target__none(&opts->target))
778 ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
783 static int intel_pt_snapshot_start(struct auxtrace_record *itr)
785 struct intel_pt_recording *ptr =
786 container_of(itr, struct intel_pt_recording, itr);
789 evlist__for_each_entry(ptr->evlist, evsel) {
790 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
791 return evsel__disable(evsel);
796 static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
798 struct intel_pt_recording *ptr =
799 container_of(itr, struct intel_pt_recording, itr);
802 evlist__for_each_entry(ptr->evlist, evsel) {
803 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
804 return evsel__enable(evsel);
809 static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
811 const size_t sz = sizeof(struct intel_pt_snapshot_ref);
812 int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
813 struct intel_pt_snapshot_ref *refs;
818 while (new_cnt <= idx)
821 refs = calloc(new_cnt, sz);
825 memcpy(refs, ptr->snapshot_refs, cnt * sz);
827 ptr->snapshot_refs = refs;
828 ptr->snapshot_ref_cnt = new_cnt;
833 static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
837 for (i = 0; i < ptr->snapshot_ref_cnt; i++)
838 zfree(&ptr->snapshot_refs[i].ref_buf);
839 zfree(&ptr->snapshot_refs);
842 static void intel_pt_recording_free(struct auxtrace_record *itr)
844 struct intel_pt_recording *ptr =
845 container_of(itr, struct intel_pt_recording, itr);
847 intel_pt_free_snapshot_refs(ptr);
851 static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
852 size_t snapshot_buf_size)
854 size_t ref_buf_size = ptr->snapshot_ref_buf_size;
857 ref_buf = zalloc(ref_buf_size);
861 ptr->snapshot_refs[idx].ref_buf = ref_buf;
862 ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
867 static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
868 size_t snapshot_buf_size)
870 const size_t max_size = 256 * 1024;
871 size_t buf_size = 0, psb_period;
873 if (ptr->snapshot_size <= 64 * 1024)
876 psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
878 buf_size = psb_period * 2;
880 if (!buf_size || buf_size > max_size)
883 if (buf_size >= snapshot_buf_size)
886 if (buf_size >= ptr->snapshot_size / 2)
892 static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
893 size_t snapshot_buf_size)
895 if (ptr->snapshot_init_done)
898 ptr->snapshot_init_done = true;
900 ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
907 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
908 * @buf1: first buffer
909 * @compare_size: number of bytes to compare
910 * @buf2: second buffer (a circular buffer)
911 * @offs2: offset in second buffer
912 * @buf2_size: size of second buffer
914 * The comparison allows for the possibility that the bytes to compare in the
915 * circular buffer are not contiguous. It is assumed that @compare_size <=
916 * @buf2_size. This function returns %false if the bytes are identical, %true
919 static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
920 void *buf2, size_t offs2, size_t buf2_size)
922 size_t end2 = offs2 + compare_size, part_size;
924 if (end2 <= buf2_size)
925 return memcmp(buf1, buf2 + offs2, compare_size);
927 part_size = end2 - buf2_size;
928 if (memcmp(buf1, buf2 + offs2, part_size))
931 compare_size -= part_size;
933 return memcmp(buf1 + part_size, buf2, compare_size);
936 static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
937 size_t ref_size, size_t buf_size,
938 void *data, size_t head)
940 size_t ref_end = ref_offset + ref_size;
942 if (ref_end > buf_size) {
943 if (head > ref_offset || head < ref_end - buf_size)
945 } else if (head > ref_offset && head < ref_end) {
949 return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
953 static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
954 void *data, size_t head)
956 if (head >= ref_size) {
957 memcpy(ref_buf, data + head - ref_size, ref_size);
959 memcpy(ref_buf, data, head);
961 memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
965 static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
966 struct auxtrace_mmap *mm, unsigned char *data,
969 struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
972 wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
973 ptr->snapshot_ref_buf_size, mm->len,
976 intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
982 static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
991 for (i = a; i < b; i++) {
999 static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
1000 struct auxtrace_mmap *mm, unsigned char *data,
1001 u64 *head, u64 *old)
1003 struct intel_pt_recording *ptr =
1004 container_of(itr, struct intel_pt_recording, itr);
1008 pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
1009 __func__, idx, (size_t)*old, (size_t)*head);
1011 err = intel_pt_snapshot_init(ptr, mm->len);
1015 if (idx >= ptr->snapshot_ref_cnt) {
1016 err = intel_pt_alloc_snapshot_refs(ptr, idx);
1021 if (ptr->snapshot_ref_buf_size) {
1022 if (!ptr->snapshot_refs[idx].ref_buf) {
1023 err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
1027 wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
1029 wrapped = ptr->snapshot_refs[idx].wrapped;
1030 if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
1031 ptr->snapshot_refs[idx].wrapped = true;
1037 * In full trace mode 'head' continually increases. However in snapshot
1038 * mode 'head' is an offset within the buffer. Here 'old' and 'head'
1039 * are adjusted to match the full trace case which expects that 'old' is
1040 * always less than 'head'.
1054 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
1055 __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
1060 pr_err("%s: failed, error %d\n", __func__, err);
1064 static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
1069 static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
1071 struct intel_pt_recording *ptr =
1072 container_of(itr, struct intel_pt_recording, itr);
1073 struct evsel *evsel;
1075 evlist__for_each_entry(ptr->evlist, evsel) {
1076 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
1077 return perf_evlist__enable_event_idx(ptr->evlist, evsel,
1083 struct auxtrace_record *intel_pt_recording_init(int *err)
1085 struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
1086 struct intel_pt_recording *ptr;
1091 if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
1096 ptr = zalloc(sizeof(struct intel_pt_recording));
1102 ptr->intel_pt_pmu = intel_pt_pmu;
1103 ptr->itr.recording_options = intel_pt_recording_options;
1104 ptr->itr.info_priv_size = intel_pt_info_priv_size;
1105 ptr->itr.info_fill = intel_pt_info_fill;
1106 ptr->itr.free = intel_pt_recording_free;
1107 ptr->itr.snapshot_start = intel_pt_snapshot_start;
1108 ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
1109 ptr->itr.find_snapshot = intel_pt_find_snapshot;
1110 ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
1111 ptr->itr.reference = intel_pt_reference;
1112 ptr->itr.read_finish = intel_pt_read_finish;