2 * Intel(R) Processor Trace PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * Intel PT is specified in the Intel Architecture Instruction Set Extensions
15 * Programming Reference:
16 * http://software.intel.com/en-us/intel-isa-extensions
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/device.h>
27 #include <asm/perf_event.h>
30 #include <asm/intel_pt.h>
32 #include "../perf_event.h"
35 static DEFINE_PER_CPU(struct pt, pt_ctx);
37 static struct pt_pmu pt_pmu;
40 * Capabilities of Intel PT hardware, such as number of address bits or
41 * supported output schemes, are cached and exported to userspace as "caps"
42 * attribute group of pt pmu device
43 * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
44 * relevant bits together with intel_pt traces.
46 * These are necessary for both trace decoding (payloads_lip, contains address
47 * width encoded in IP-related packets), and event configuration (bitmasks with
48 * permitted values for certain bit fields).
50 #define PT_CAP(_n, _l, _r, _m) \
51 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
52 .reg = _r, .mask = _m }
54 static struct pt_cap_desc {
60 PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff),
61 PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)),
62 PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)),
63 PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)),
64 PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
65 PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
66 PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
67 PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
68 PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
69 PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
70 PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
71 PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
72 PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
73 PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
74 PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
77 static u32 pt_cap_get(enum pt_capabilities cap)
79 struct pt_cap_desc *cd = &pt_caps[cap];
80 u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
81 unsigned int shift = __ffs(cd->mask);
83 return (c & cd->mask) >> shift;
86 static ssize_t pt_cap_show(struct device *cdev,
87 struct device_attribute *attr,
90 struct dev_ext_attribute *ea =
91 container_of(attr, struct dev_ext_attribute, attr);
92 enum pt_capabilities cap = (long)ea->var;
94 return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
97 static struct attribute_group pt_cap_group = {
101 PMU_FORMAT_ATTR(cyc, "config:1" );
102 PMU_FORMAT_ATTR(mtc, "config:9" );
103 PMU_FORMAT_ATTR(tsc, "config:10" );
104 PMU_FORMAT_ATTR(noretcomp, "config:11" );
105 PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
106 PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
107 PMU_FORMAT_ATTR(psb_period, "config:24-27" );
109 static struct attribute *pt_formats_attr[] = {
110 &format_attr_cyc.attr,
111 &format_attr_mtc.attr,
112 &format_attr_tsc.attr,
113 &format_attr_noretcomp.attr,
114 &format_attr_mtc_period.attr,
115 &format_attr_cyc_thresh.attr,
116 &format_attr_psb_period.attr,
120 static struct attribute_group pt_format_group = {
122 .attrs = pt_formats_attr,
126 pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
129 struct perf_pmu_events_attr *pmu_attr =
130 container_of(attr, struct perf_pmu_events_attr, attr);
132 switch (pmu_attr->id) {
134 return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
136 return sprintf(page, "%u:%u\n",
146 PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
147 pt_timing_attr_show);
148 PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
149 pt_timing_attr_show);
151 static struct attribute *pt_timing_attr[] = {
152 &timing_attr_max_nonturbo_ratio.attr.attr,
153 &timing_attr_tsc_art_ratio.attr.attr,
157 static struct attribute_group pt_timing_group = {
158 .attrs = pt_timing_attr,
161 static const struct attribute_group *pt_attr_groups[] = {
168 static int __init pt_pmu_hw_init(void)
170 struct dev_ext_attribute *de_attrs;
171 struct attribute **attrs;
177 rdmsrl(MSR_PLATFORM_INFO, reg);
178 pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
181 * if available, read in TSC to core crystal clock ratio,
182 * otherwise, zero for numerator stands for "not enumerated"
185 if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
186 u32 eax, ebx, ecx, edx;
188 cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
190 pt_pmu.tsc_art_num = ebx;
191 pt_pmu.tsc_art_den = eax;
194 if (boot_cpu_has(X86_FEATURE_VMX)) {
196 * Intel SDM, 36.5 "Tracing post-VMXON" says that
197 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
200 rdmsrl(MSR_IA32_VMX_MISC, reg);
207 for (i = 0; i < PT_CPUID_LEAVES; i++) {
209 &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
210 &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
211 &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
212 &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
216 size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
217 attrs = kzalloc(size, GFP_KERNEL);
221 size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
222 de_attrs = kzalloc(size, GFP_KERNEL);
226 for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
227 struct dev_ext_attribute *de_attr = de_attrs + i;
229 de_attr->attr.attr.name = pt_caps[i].name;
231 sysfs_attr_init(&de_attr->attr.attr);
233 de_attr->attr.attr.mode = S_IRUGO;
234 de_attr->attr.show = pt_cap_show;
235 de_attr->var = (void *)i;
237 attrs[i] = &de_attr->attr.attr;
240 pt_cap_group.attrs = attrs;
250 #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
251 RTIT_CTL_CYC_THRESH | \
254 #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \
257 #define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \
260 #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \
264 RTIT_CTL_PWR_EVT_EN | \
265 RTIT_CTL_FUP_ON_PTW | \
268 static bool pt_event_valid(struct perf_event *event)
270 u64 config = event->attr.config;
271 u64 allowed, requested;
273 if ((config & PT_CONFIG_MASK) != config)
276 if (config & RTIT_CTL_CYC_PSB) {
277 if (!pt_cap_get(PT_CAP_psb_cyc))
280 allowed = pt_cap_get(PT_CAP_psb_periods);
281 requested = (config & RTIT_CTL_PSB_FREQ) >>
282 RTIT_CTL_PSB_FREQ_OFFSET;
283 if (requested && (!(allowed & BIT(requested))))
286 allowed = pt_cap_get(PT_CAP_cycle_thresholds);
287 requested = (config & RTIT_CTL_CYC_THRESH) >>
288 RTIT_CTL_CYC_THRESH_OFFSET;
289 if (requested && (!(allowed & BIT(requested))))
293 if (config & RTIT_CTL_MTC) {
295 * In the unlikely case that CPUID lists valid mtc periods,
296 * but not the mtc capability, drop out here.
298 * Spec says that setting mtc period bits while mtc bit in
299 * CPUID is 0 will #GP, so better safe than sorry.
301 if (!pt_cap_get(PT_CAP_mtc))
304 allowed = pt_cap_get(PT_CAP_mtc_periods);
308 requested = (config & RTIT_CTL_MTC_RANGE) >>
309 RTIT_CTL_MTC_RANGE_OFFSET;
311 if (!(allowed & BIT(requested)))
315 if (config & RTIT_CTL_PWR_EVT_EN &&
316 !pt_cap_get(PT_CAP_power_event_trace))
319 if (config & RTIT_CTL_PTW) {
320 if (!pt_cap_get(PT_CAP_ptwrite))
323 /* FUPonPTW without PTW doesn't make sense */
324 if ((config & RTIT_CTL_FUP_ON_PTW) &&
325 !(config & RTIT_CTL_PTW_EN))
333 * PT configuration helpers
334 * These all are cpu affine and operate on a local PT
337 /* Address ranges and their corresponding msr configuration registers */
338 static const struct pt_address_range {
341 unsigned int reg_off;
342 } pt_address_ranges[] = {
344 .msr_a = MSR_IA32_RTIT_ADDR0_A,
345 .msr_b = MSR_IA32_RTIT_ADDR0_B,
346 .reg_off = RTIT_CTL_ADDR0_OFFSET,
349 .msr_a = MSR_IA32_RTIT_ADDR1_A,
350 .msr_b = MSR_IA32_RTIT_ADDR1_B,
351 .reg_off = RTIT_CTL_ADDR1_OFFSET,
354 .msr_a = MSR_IA32_RTIT_ADDR2_A,
355 .msr_b = MSR_IA32_RTIT_ADDR2_B,
356 .reg_off = RTIT_CTL_ADDR2_OFFSET,
359 .msr_a = MSR_IA32_RTIT_ADDR3_A,
360 .msr_b = MSR_IA32_RTIT_ADDR3_B,
361 .reg_off = RTIT_CTL_ADDR3_OFFSET,
365 static u64 pt_config_filters(struct perf_event *event)
367 struct pt_filters *filters = event->hw.addr_filters;
368 struct pt *pt = this_cpu_ptr(&pt_ctx);
369 unsigned int range = 0;
375 perf_event_addr_filters_sync(event);
377 for (range = 0; range < filters->nr_filters; range++) {
378 struct pt_filter *filter = &filters->filter[range];
381 * Note, if the range has zero start/end addresses due
382 * to its dynamic object not being loaded yet, we just
383 * go ahead and program zeroed range, which will simply
384 * produce no data. Note^2: if executable code at 0x0
385 * is a concern, we can set up an "invalid" configuration
386 * such as msr_b < msr_a.
389 /* avoid redundant msr writes */
390 if (pt->filters.filter[range].msr_a != filter->msr_a) {
391 wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
392 pt->filters.filter[range].msr_a = filter->msr_a;
395 if (pt->filters.filter[range].msr_b != filter->msr_b) {
396 wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
397 pt->filters.filter[range].msr_b = filter->msr_b;
400 rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
406 static void pt_config(struct perf_event *event)
410 if (!event->hw.itrace_started) {
411 event->hw.itrace_started = 1;
412 wrmsrl(MSR_IA32_RTIT_STATUS, 0);
415 reg = pt_config_filters(event);
416 reg |= RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
418 if (!event->attr.exclude_kernel)
420 if (!event->attr.exclude_user)
423 reg |= (event->attr.config & PT_CONFIG_MASK);
425 event->hw.config = reg;
426 wrmsrl(MSR_IA32_RTIT_CTL, reg);
429 static void pt_config_stop(struct perf_event *event)
431 u64 ctl = READ_ONCE(event->hw.config);
433 /* may be already stopped by a PMI */
434 if (!(ctl & RTIT_CTL_TRACEEN))
437 ctl &= ~RTIT_CTL_TRACEEN;
438 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
440 WRITE_ONCE(event->hw.config, ctl);
443 * A wrmsr that disables trace generation serializes other PT
444 * registers and causes all data packets to be written to memory,
445 * but a fence is required for the data to become globally visible.
447 * The below WMB, separating data store and aux_head store matches
448 * the consumer's RMB that separates aux_head load and data load.
453 static void pt_config_buffer(void *buf, unsigned int topa_idx,
454 unsigned int output_off)
458 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
460 reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
462 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
466 * Keep ToPA table-related metadata on the same page as the actual table,
467 * taking up a few words from the top
470 #define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
473 * struct topa - page-sized ToPA table with metadata at the top
474 * @table: actual ToPA table entries, as understood by PT hardware
475 * @list: linkage to struct pt_buffer's list of tables
476 * @phys: physical address of this page
477 * @offset: offset of the first entry in this table in the buffer
478 * @size: total size of all entries in this table
479 * @last: index of the last initialized entry in this table
482 struct topa_entry table[TENTS_PER_PAGE];
483 struct list_head list;
490 /* make -1 stand for the last table entry */
491 #define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
494 * topa_alloc() - allocate page-sized ToPA table
495 * @cpu: CPU on which to allocate.
496 * @gfp: Allocation flags.
498 * Return: On success, return the pointer to ToPA table page.
500 static struct topa *topa_alloc(int cpu, gfp_t gfp)
502 int node = cpu_to_node(cpu);
506 p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
510 topa = page_address(p);
512 topa->phys = page_to_phys(p);
515 * In case of singe-entry ToPA, always put the self-referencing END
516 * link as the 2nd entry in the table
518 if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
519 TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
520 TOPA_ENTRY(topa, 1)->end = 1;
527 * topa_free() - free a page-sized ToPA table
528 * @topa: Table to deallocate.
530 static void topa_free(struct topa *topa)
532 free_page((unsigned long)topa);
536 * topa_insert_table() - insert a ToPA table into a buffer
537 * @buf: PT buffer that's being extended.
538 * @topa: New topa table to be inserted.
540 * If it's the first table in this buffer, set up buffer's pointers
541 * accordingly; otherwise, add a END=1 link entry to @topa to the current
542 * "last" table and adjust the last table pointer to @topa.
544 static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
546 struct topa *last = buf->last;
548 list_add_tail(&topa->list, &buf->tables);
551 buf->first = buf->last = buf->cur = topa;
555 topa->offset = last->offset + last->size;
558 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
561 BUG_ON(last->last != TENTS_PER_PAGE - 1);
563 TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
564 TOPA_ENTRY(last, -1)->end = 1;
568 * topa_table_full() - check if a ToPA table is filled up
571 static bool topa_table_full(struct topa *topa)
573 /* single-entry ToPA is a special case */
574 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
577 return topa->last == TENTS_PER_PAGE - 1;
581 * topa_insert_pages() - create a list of ToPA tables
582 * @buf: PT buffer being initialized.
583 * @gfp: Allocation flags.
585 * This initializes a list of ToPA tables with entries from
586 * the data_pages provided by rb_alloc_aux().
588 * Return: 0 on success or error code.
590 static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
592 struct topa *topa = buf->last;
596 p = virt_to_page(buf->data_pages[buf->nr_pages]);
598 order = page_private(p);
600 if (topa_table_full(topa)) {
601 topa = topa_alloc(buf->cpu, gfp);
605 topa_insert_table(buf, topa);
608 TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
609 TOPA_ENTRY(topa, -1)->size = order;
610 if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
611 TOPA_ENTRY(topa, -1)->intr = 1;
612 TOPA_ENTRY(topa, -1)->stop = 1;
616 topa->size += sizes(order);
618 buf->nr_pages += 1ul << order;
624 * pt_topa_dump() - print ToPA tables and their entries
627 static void pt_topa_dump(struct pt_buffer *buf)
631 list_for_each_entry(topa, &buf->tables, list) {
634 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
635 topa->phys, topa->offset, topa->size);
636 for (i = 0; i < TENTS_PER_PAGE; i++) {
637 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
639 (unsigned long)topa->table[i].base << TOPA_SHIFT,
640 sizes(topa->table[i].size),
641 topa->table[i].end ? 'E' : ' ',
642 topa->table[i].intr ? 'I' : ' ',
643 topa->table[i].stop ? 'S' : ' ',
644 *(u64 *)&topa->table[i]);
645 if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
646 topa->table[i].stop) ||
654 * pt_buffer_advance() - advance to the next output region
657 * Advance the current pointers in the buffer to the next ToPA entry.
659 static void pt_buffer_advance(struct pt_buffer *buf)
664 if (buf->cur_idx == buf->cur->last) {
665 if (buf->cur == buf->last)
666 buf->cur = buf->first;
668 buf->cur = list_entry(buf->cur->list.next, struct topa,
675 * pt_update_head() - calculate current offsets and sizes
676 * @pt: Per-cpu pt context.
678 * Update buffer's current write pointer position and data size.
680 static void pt_update_head(struct pt *pt)
682 struct pt_buffer *buf = perf_get_aux(&pt->handle);
683 u64 topa_idx, base, old;
685 /* offset of the first region in this table from the beginning of buf */
686 base = buf->cur->offset + buf->output_off;
688 /* offset of the current output region within this table */
689 for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
690 base += sizes(buf->cur->table[topa_idx].size);
693 local_set(&buf->data_size, base);
695 old = (local64_xchg(&buf->head, base) &
696 ((buf->nr_pages << PAGE_SHIFT) - 1));
698 base += buf->nr_pages << PAGE_SHIFT;
700 local_add(base - old, &buf->data_size);
705 * pt_buffer_region() - obtain current output region's address
708 static void *pt_buffer_region(struct pt_buffer *buf)
710 return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
714 * pt_buffer_region_size() - obtain current output region's size
717 static size_t pt_buffer_region_size(struct pt_buffer *buf)
719 return sizes(buf->cur->table[buf->cur_idx].size);
723 * pt_handle_status() - take care of possible status conditions
724 * @pt: Per-cpu pt context.
726 static void pt_handle_status(struct pt *pt)
728 struct pt_buffer *buf = perf_get_aux(&pt->handle);
732 rdmsrl(MSR_IA32_RTIT_STATUS, status);
734 if (status & RTIT_STATUS_ERROR) {
735 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
737 status &= ~RTIT_STATUS_ERROR;
740 if (status & RTIT_STATUS_STOPPED) {
741 status &= ~RTIT_STATUS_STOPPED;
744 * On systems that only do single-entry ToPA, hitting STOP
745 * means we are already losing data; need to let the decoder
748 if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
749 buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
750 local_inc(&buf->lost);
756 * Also on single-entry ToPA implementations, interrupt will come
757 * before the output reaches its output region's boundary.
759 if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
760 pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
761 void *head = pt_buffer_region(buf);
763 /* everything within this margin needs to be zeroed out */
764 memset(head + buf->output_off, 0,
765 pt_buffer_region_size(buf) -
771 pt_buffer_advance(buf);
773 wrmsrl(MSR_IA32_RTIT_STATUS, status);
777 * pt_read_offset() - translate registers into buffer pointers
780 * Set buffer's output pointers from MSR values.
782 static void pt_read_offset(struct pt_buffer *buf)
784 u64 offset, base_topa;
786 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
787 buf->cur = phys_to_virt(base_topa);
789 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
790 /* offset within current output region */
791 buf->output_off = offset >> 32;
792 /* index of current output region within this table */
793 buf->cur_idx = (offset & 0xffffff80) >> 7;
797 * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
799 * @pg: Page offset in the buffer.
801 * When advancing to the next output region (ToPA entry), given a page offset
802 * into the buffer, we need to find the offset of the first page in the next
805 static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
807 struct topa_entry *te = buf->topa_index[pg];
810 if (buf->first == buf->last && buf->first->last == 1)
815 pg &= buf->nr_pages - 1;
816 } while (buf->topa_index[pg] == te);
822 * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
824 * @handle: Current output handle.
826 * Place INT and STOP marks to prevent overwriting old data that the consumer
827 * hasn't yet collected and waking up the consumer after a certain fraction of
828 * the buffer has filled up. Only needed and sensible for non-snapshot counters.
830 * This obviously relies on buf::head to figure out buffer markers, so it has
831 * to be called after pt_buffer_reset_offsets() and before the hardware tracing
834 static int pt_buffer_reset_markers(struct pt_buffer *buf,
835 struct perf_output_handle *handle)
838 unsigned long head = local64_read(&buf->head);
839 unsigned long idx, npages, wakeup;
841 /* can't stop in the middle of an output region */
842 if (buf->output_off + handle->size + 1 <
843 sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size))
847 /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
848 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
851 /* clear STOP and INT from current entry */
852 buf->topa_index[buf->stop_pos]->stop = 0;
853 buf->topa_index[buf->stop_pos]->intr = 0;
854 buf->topa_index[buf->intr_pos]->intr = 0;
856 /* how many pages till the STOP marker */
857 npages = handle->size >> PAGE_SHIFT;
859 /* if it's on a page boundary, fill up one more page */
860 if (!offset_in_page(head + handle->size + 1))
863 idx = (head >> PAGE_SHIFT) + npages;
864 idx &= buf->nr_pages - 1;
867 wakeup = handle->wakeup >> PAGE_SHIFT;
869 /* in the worst case, wake up the consumer one page before hard stop */
870 idx = (head >> PAGE_SHIFT) + npages - 1;
874 idx &= buf->nr_pages - 1;
877 buf->topa_index[buf->stop_pos]->stop = 1;
878 buf->topa_index[buf->stop_pos]->intr = 1;
879 buf->topa_index[buf->intr_pos]->intr = 1;
885 * pt_buffer_setup_topa_index() - build topa_index[] table of regions
888 * topa_index[] references output regions indexed by offset into the
889 * buffer for purposes of quick reverse lookup.
891 static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
893 struct topa *cur = buf->first, *prev = buf->last;
894 struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
895 *te_prev = TOPA_ENTRY(prev, prev->last - 1);
898 while (pg < buf->nr_pages) {
901 /* pages within one topa entry */
902 for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
903 buf->topa_index[pg] = te_prev;
907 if (idx == cur->last - 1) {
908 /* advance to next topa table */
910 cur = list_entry(cur->list.next, struct topa, list);
914 te_cur = TOPA_ENTRY(cur, idx);
920 * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
922 * @head: Write pointer (aux_head) from AUX buffer.
924 * Find the ToPA table and entry corresponding to given @head and set buffer's
925 * "current" pointers accordingly. This is done after we have obtained the
926 * current aux_head position from a successful call to perf_aux_output_begin()
927 * to make sure the hardware is writing to the right place.
929 * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
930 * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
931 * which are used to determine INT and STOP markers' locations by a subsequent
932 * call to pt_buffer_reset_markers().
934 static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
939 head &= (buf->nr_pages << PAGE_SHIFT) - 1;
941 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
942 pg = pt_topa_next_entry(buf, pg);
944 buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
945 buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
946 (unsigned long)buf->cur) / sizeof(struct topa_entry);
947 buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
949 local64_set(&buf->head, head);
950 local_set(&buf->data_size, 0);
954 * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
957 static void pt_buffer_fini_topa(struct pt_buffer *buf)
959 struct topa *topa, *iter;
961 list_for_each_entry_safe(topa, iter, &buf->tables, list) {
963 * right now, this is in free_aux() path only, so
964 * no need to unlink this table from the list
971 * pt_buffer_init_topa() - initialize ToPA table for pt buffer
973 * @size: Total size of all regions within this ToPA.
974 * @gfp: Allocation flags.
976 static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
982 topa = topa_alloc(buf->cpu, gfp);
986 topa_insert_table(buf, topa);
988 while (buf->nr_pages < nr_pages) {
989 err = topa_insert_pages(buf, gfp);
991 pt_buffer_fini_topa(buf);
996 pt_buffer_setup_topa_index(buf);
998 /* link last table to the first one, unless we're double buffering */
999 if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
1000 TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
1001 TOPA_ENTRY(buf->last, -1)->end = 1;
1009 * pt_buffer_setup_aux() - set up topa tables for a PT buffer
1010 * @cpu: Cpu on which to allocate, -1 means current.
1011 * @pages: Array of pointers to buffer pages passed from perf core.
1012 * @nr_pages: Number of pages in the buffer.
1013 * @snapshot: If this is a snapshot/overwrite counter.
1015 * This is a pmu::setup_aux callback that sets up ToPA tables and all the
1016 * bookkeeping for an AUX buffer.
1018 * Return: Our private PT buffer structure.
1021 pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
1023 struct pt_buffer *buf;
1030 cpu = raw_smp_processor_id();
1031 node = cpu_to_node(cpu);
1033 buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
1039 buf->snapshot = snapshot;
1040 buf->data_pages = pages;
1042 INIT_LIST_HEAD(&buf->tables);
1044 ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
1054 * pt_buffer_free_aux() - perf AUX deallocation path callback
1057 static void pt_buffer_free_aux(void *data)
1059 struct pt_buffer *buf = data;
1061 pt_buffer_fini_topa(buf);
1065 static int pt_addr_filters_init(struct perf_event *event)
1067 struct pt_filters *filters;
1068 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
1070 if (!pt_cap_get(PT_CAP_num_address_ranges))
1073 filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
1078 memcpy(filters, event->parent->hw.addr_filters,
1081 event->hw.addr_filters = filters;
1086 static void pt_addr_filters_fini(struct perf_event *event)
1088 kfree(event->hw.addr_filters);
1089 event->hw.addr_filters = NULL;
1092 static inline bool valid_kernel_ip(unsigned long ip)
1094 return virt_addr_valid(ip) && kernel_ip(ip);
1097 static int pt_event_addr_filters_validate(struct list_head *filters)
1099 struct perf_addr_filter *filter;
1102 list_for_each_entry(filter, filters, entry) {
1103 /* PT doesn't support single address triggers */
1104 if (!filter->range || !filter->size)
1107 if (!filter->inode) {
1108 if (!valid_kernel_ip(filter->offset))
1111 if (!valid_kernel_ip(filter->offset + filter->size))
1115 if (++range > pt_cap_get(PT_CAP_num_address_ranges))
1122 static void pt_event_addr_filters_sync(struct perf_event *event)
1124 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
1125 unsigned long msr_a, msr_b, *offs = event->addr_filters_offs;
1126 struct pt_filters *filters = event->hw.addr_filters;
1127 struct perf_addr_filter *filter;
1133 list_for_each_entry(filter, &head->list, entry) {
1134 if (filter->inode && !offs[range]) {
1137 /* apply the offset */
1138 msr_a = filter->offset + offs[range];
1139 msr_b = filter->size + msr_a - 1;
1142 filters->filter[range].msr_a = msr_a;
1143 filters->filter[range].msr_b = msr_b;
1144 filters->filter[range].config = filter->filter ? 1 : 2;
1148 filters->nr_filters = range;
1152 * intel_pt_interrupt() - PT PMI handler
1154 void intel_pt_interrupt(void)
1156 struct pt *pt = this_cpu_ptr(&pt_ctx);
1157 struct pt_buffer *buf;
1158 struct perf_event *event = pt->handle.event;
1161 * There may be a dangling PT bit in the interrupt status register
1162 * after PT has been disabled by pt_event_stop(). Make sure we don't
1163 * do anything (particularly, re-enable) for this event here.
1165 if (!READ_ONCE(pt->handle_nmi))
1169 * If VMX is on and PT does not support it, don't touch anything.
1171 if (READ_ONCE(pt->vmx_on))
1177 pt_config_stop(event);
1179 buf = perf_get_aux(&pt->handle);
1183 pt_read_offset(buf);
1185 pt_handle_status(pt);
1189 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
1190 local_xchg(&buf->lost, 0));
1192 if (!event->hw.state) {
1195 buf = perf_aux_output_begin(&pt->handle, event);
1197 event->hw.state = PERF_HES_STOPPED;
1201 pt_buffer_reset_offsets(buf, pt->handle.head);
1202 /* snapshot counters don't use PMI, so it's safe */
1203 ret = pt_buffer_reset_markers(buf, &pt->handle);
1205 perf_aux_output_end(&pt->handle, 0, true);
1209 pt_config_buffer(buf->cur->table, buf->cur_idx,
1215 void intel_pt_handle_vmx(int on)
1217 struct pt *pt = this_cpu_ptr(&pt_ctx);
1218 struct perf_event *event;
1219 unsigned long flags;
1221 /* PT plays nice with VMX, do nothing */
1226 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1227 * sure to not try to set it while VMX is on. Disable
1228 * interrupts to avoid racing with pmu callbacks;
1229 * concurrent PMI should be handled fine.
1231 local_irq_save(flags);
1232 WRITE_ONCE(pt->vmx_on, on);
1235 /* prevent pt_config_stop() from writing RTIT_CTL */
1236 event = pt->handle.event;
1238 event->hw.config = 0;
1240 local_irq_restore(flags);
1242 EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1248 static void pt_event_start(struct perf_event *event, int mode)
1250 struct hw_perf_event *hwc = &event->hw;
1251 struct pt *pt = this_cpu_ptr(&pt_ctx);
1252 struct pt_buffer *buf;
1254 if (READ_ONCE(pt->vmx_on))
1257 buf = perf_aux_output_begin(&pt->handle, event);
1261 pt_buffer_reset_offsets(buf, pt->handle.head);
1262 if (!buf->snapshot) {
1263 if (pt_buffer_reset_markers(buf, &pt->handle))
1267 WRITE_ONCE(pt->handle_nmi, 1);
1270 pt_config_buffer(buf->cur->table, buf->cur_idx,
1277 perf_aux_output_end(&pt->handle, 0, true);
1279 hwc->state = PERF_HES_STOPPED;
1282 static void pt_event_stop(struct perf_event *event, int mode)
1284 struct pt *pt = this_cpu_ptr(&pt_ctx);
1287 * Protect against the PMI racing with disabling wrmsr,
1288 * see comment in intel_pt_interrupt().
1290 WRITE_ONCE(pt->handle_nmi, 0);
1292 pt_config_stop(event);
1294 if (event->hw.state == PERF_HES_STOPPED)
1297 event->hw.state = PERF_HES_STOPPED;
1299 if (mode & PERF_EF_UPDATE) {
1300 struct pt_buffer *buf = perf_get_aux(&pt->handle);
1305 if (WARN_ON_ONCE(pt->handle.event != event))
1308 pt_read_offset(buf);
1310 pt_handle_status(pt);
1316 local_xchg(&buf->data_size,
1317 buf->nr_pages << PAGE_SHIFT);
1318 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
1319 local_xchg(&buf->lost, 0));
1323 static void pt_event_del(struct perf_event *event, int mode)
1325 pt_event_stop(event, PERF_EF_UPDATE);
1328 static int pt_event_add(struct perf_event *event, int mode)
1330 struct pt *pt = this_cpu_ptr(&pt_ctx);
1331 struct hw_perf_event *hwc = &event->hw;
1334 if (pt->handle.event)
1337 if (mode & PERF_EF_START) {
1338 pt_event_start(event, 0);
1340 if (hwc->state == PERF_HES_STOPPED)
1343 hwc->state = PERF_HES_STOPPED;
1352 static void pt_event_read(struct perf_event *event)
1356 static void pt_event_destroy(struct perf_event *event)
1358 pt_addr_filters_fini(event);
1359 x86_del_exclusive(x86_lbr_exclusive_pt);
1362 static int pt_event_init(struct perf_event *event)
1364 if (event->attr.type != pt_pmu.pmu.type)
1367 if (!pt_event_valid(event))
1370 if (x86_add_exclusive(x86_lbr_exclusive_pt))
1373 if (pt_addr_filters_init(event)) {
1374 x86_del_exclusive(x86_lbr_exclusive_pt);
1378 event->destroy = pt_event_destroy;
1383 void cpu_emergency_stop_pt(void)
1385 struct pt *pt = this_cpu_ptr(&pt_ctx);
1387 if (pt->handle.event)
1388 pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
1391 static __init int pt_init(void)
1393 int ret, cpu, prior_warn = 0;
1395 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1397 if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
1401 for_each_online_cpu(cpu) {
1404 ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1405 if (!ret && (ctl & RTIT_CTL_TRACEEN))
1411 x86_add_exclusive(x86_lbr_exclusive_pt);
1412 pr_warn("PT is enabled at boot time, doing nothing\n");
1417 ret = pt_pmu_hw_init();
1421 if (!pt_cap_get(PT_CAP_topa_output)) {
1422 pr_warn("ToPA output is not supported on this CPU\n");
1426 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
1427 pt_pmu.pmu.capabilities =
1428 PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
1430 pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1431 pt_pmu.pmu.attr_groups = pt_attr_groups;
1432 pt_pmu.pmu.task_ctx_nr = perf_sw_context;
1433 pt_pmu.pmu.event_init = pt_event_init;
1434 pt_pmu.pmu.add = pt_event_add;
1435 pt_pmu.pmu.del = pt_event_del;
1436 pt_pmu.pmu.start = pt_event_start;
1437 pt_pmu.pmu.stop = pt_event_stop;
1438 pt_pmu.pmu.read = pt_event_read;
1439 pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
1440 pt_pmu.pmu.free_aux = pt_buffer_free_aux;
1441 pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
1442 pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
1443 pt_pmu.pmu.nr_addr_filters =
1444 pt_cap_get(PT_CAP_num_address_ranges);
1446 ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1450 arch_initcall(pt_init);