]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge remote-tracking branch 'tip/perf/urgent' into perf/core
authorArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 4 Jun 2018 13:28:20 +0000 (10:28 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 4 Jun 2018 13:28:20 +0000 (10:28 -0300)
To pick up fixes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
102 files changed:
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_nhmex.c
arch/x86/events/intel/uncore_snb.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/kernel/perf_regs.c
include/linux/perf_event.h
kernel/events/core.c
tools/include/linux/compiler-gcc.h
tools/lib/api/fs/tracing_path.c
tools/lib/api/fs/tracing_path.h
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/perf/Documentation/Makefile
tools/perf/Documentation/asciidoctor-extensions.rb [new file with mode: 0644]
tools/perf/Documentation/perf-buildid-cache.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/tests/dwarf-unwind.c
tools/perf/arch/arm64/tests/dwarf-unwind.c
tools/perf/arch/powerpc/tests/dwarf-unwind.c
tools/perf/arch/powerpc/util/skip-callchain-idx.c
tools/perf/arch/x86/tests/dwarf-unwind.c
tools/perf/arch/x86/util/Build
tools/perf/arch/x86/util/event.c [new file with mode: 0644]
tools/perf/arch/x86/util/machine.c [new file with mode: 0644]
tools/perf/builtin-annotate.c
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-inject.c
tools/perf/builtin-kallsyms.c
tools/perf/builtin-kmem.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-timechart.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/check-headers.sh
tools/perf/examples/bpf/5sec.c [new file with mode: 0644]
tools/perf/examples/bpf/empty.c [new file with mode: 0644]
tools/perf/include/bpf/bpf.h [new file with mode: 0644]
tools/perf/perf.c
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/hists_common.c
tools/perf/tests/mmap-thread-lookup.c
tools/perf/tests/parse-events.c
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/tests/vmlinux-kallsyms.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/map.c
tools/perf/ui/stdio/hist.c
tools/perf/util/Build
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/auxtrace.c
tools/perf/util/build-id.c
tools/perf/util/config.c
tools/perf/util/config.h
tools/perf/util/cs-etm.c
tools/perf/util/db-export.c
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/env.c
tools/perf/util/env.h
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/genelf.c
tools/perf/util/intel-bts.c
tools/perf/util/intel-pt.c
tools/perf/util/llvm-utils.c
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/parse-events.c
tools/perf/util/probe-event.c
tools/perf/util/probe-file.c
tools/perf/util/session.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/srcline.c
tools/perf/util/stat.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol-minimal.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/symbol_fprintf.c
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event.c
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libunwind-local.c
tools/perf/util/util.c
tools/perf/util/util.h
tools/perf/util/vdso.c

index 3b993942a0e4ea6d3d067fccac492a231d8eee32..8d016ce5b80dcc0ab1102c0e864bb706297d6de4 100644 (file)
@@ -1194,7 +1194,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
                    filter->action == PERF_ADDR_FILTER_ACTION_START)
                        return -EOPNOTSUPP;
 
-               if (!filter->inode) {
+               if (!filter->path.dentry) {
                        if (!valid_kernel_ip(filter->offset))
                                return -EINVAL;
 
@@ -1221,7 +1221,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
                return;
 
        list_for_each_entry(filter, &head->list, entry) {
-               if (filter->inode && !offs[range]) {
+               if (filter->path.dentry && !offs[range]) {
                        msr_a = msr_b = 0;
                } else {
                        /* apply the offset */
index a7956fc7ca1d873734a0bc4f1be6cd5d1949907c..15b07379e72dc40c26a48a42de7e8ba03064a29d 100644 (file)
@@ -203,7 +203,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box,
        hwc->idx = idx;
        hwc->last_tag = ++box->tags[idx];
 
-       if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
+       if (uncore_pmc_fixed(hwc->idx)) {
                hwc->event_base = uncore_fixed_ctr(box);
                hwc->config_base = uncore_fixed_ctl(box);
                return;
@@ -218,7 +218,9 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
        u64 prev_count, new_count, delta;
        int shift;
 
-       if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+       if (uncore_pmc_freerunning(event->hw.idx))
+               shift = 64 - uncore_freerunning_bits(box, event);
+       else if (uncore_pmc_fixed(event->hw.idx))
                shift = 64 - uncore_fixed_ctr_bits(box);
        else
                shift = 64 - uncore_perf_ctr_bits(box);
@@ -449,15 +451,30 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
        return ret ? -EINVAL : 0;
 }
 
-static void uncore_pmu_event_start(struct perf_event *event, int flags)
+void uncore_pmu_event_start(struct perf_event *event, int flags)
 {
        struct intel_uncore_box *box = uncore_event_to_box(event);
        int idx = event->hw.idx;
 
-       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+       if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
                return;
 
-       if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
+       /*
+        * Free running counter is read-only and always active.
+        * Use the current counter value as start point.
+        * There is no overflow interrupt for free running counter.
+        * Use hrtimer to periodically poll the counter to avoid overflow.
+        */
+       if (uncore_pmc_freerunning(event->hw.idx)) {
+               list_add_tail(&event->active_entry, &box->active_list);
+               local64_set(&event->hw.prev_count,
+                           uncore_read_counter(box, event));
+               if (box->n_active++ == 0)
+                       uncore_pmu_start_hrtimer(box);
+               return;
+       }
+
+       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
                return;
 
        event->hw.state = 0;
@@ -474,11 +491,20 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags)
        }
 }
 
-static void uncore_pmu_event_stop(struct perf_event *event, int flags)
+void uncore_pmu_event_stop(struct perf_event *event, int flags)
 {
        struct intel_uncore_box *box = uncore_event_to_box(event);
        struct hw_perf_event *hwc = &event->hw;
 
+       /* Cannot disable free running counter which is read-only */
+       if (uncore_pmc_freerunning(hwc->idx)) {
+               list_del(&event->active_entry);
+               if (--box->n_active == 0)
+                       uncore_pmu_cancel_hrtimer(box);
+               uncore_perf_event_update(box, event);
+               return;
+       }
+
        if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
                uncore_disable_event(box, event);
                box->n_active--;
@@ -502,7 +528,7 @@ static void uncore_pmu_event_stop(struct perf_event *event, int flags)
        }
 }
 
-static int uncore_pmu_event_add(struct perf_event *event, int flags)
+int uncore_pmu_event_add(struct perf_event *event, int flags)
 {
        struct intel_uncore_box *box = uncore_event_to_box(event);
        struct hw_perf_event *hwc = &event->hw;
@@ -512,6 +538,17 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags)
        if (!box)
                return -ENODEV;
 
+       /*
+        * The free funning counter is assigned in event_init().
+        * The free running counter event and free running counter
+        * are 1:1 mapped. It doesn't need to be tracked in event_list.
+        */
+       if (uncore_pmc_freerunning(hwc->idx)) {
+               if (flags & PERF_EF_START)
+                       uncore_pmu_event_start(event, 0);
+               return 0;
+       }
+
        ret = n = uncore_collect_events(box, event, false);
        if (ret < 0)
                return ret;
@@ -563,13 +600,21 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags)
        return 0;
 }
 
-static void uncore_pmu_event_del(struct perf_event *event, int flags)
+void uncore_pmu_event_del(struct perf_event *event, int flags)
 {
        struct intel_uncore_box *box = uncore_event_to_box(event);
        int i;
 
        uncore_pmu_event_stop(event, PERF_EF_UPDATE);
 
+       /*
+        * The event for free running counter is not tracked by event_list.
+        * It doesn't need to force event->hw.idx = -1 to reassign the counter.
+        * Because the event and the free running counter are 1:1 mapped.
+        */
+       if (uncore_pmc_freerunning(event->hw.idx))
+               return;
+
        for (i = 0; i < box->n_events; i++) {
                if (event == box->event_list[i]) {
                        uncore_put_event_constraint(box, event);
@@ -603,6 +648,10 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
        struct intel_uncore_box *fake_box;
        int ret = -EINVAL, n;
 
+       /* The free running counter is always active. */
+       if (uncore_pmc_freerunning(event->hw.idx))
+               return 0;
+
        fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
        if (!fake_box)
                return -ENOMEM;
@@ -690,6 +739,17 @@ static int uncore_pmu_event_init(struct perf_event *event)
 
                /* fixed counters have event field hardcoded to zero */
                hwc->config = 0ULL;
+       } else if (is_freerunning_event(event)) {
+               if (!check_valid_freerunning_event(box, event))
+                       return -EINVAL;
+               event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
+               /*
+                * The free running counter event and free running counter
+                * are always 1:1 mapped.
+                * The free running counter is always active.
+                * Assign the free running counter here.
+                */
+               event->hw.event_base = uncore_freerunning_counter(box, event);
        } else {
                hwc->config = event->attr.config &
                              (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
index 414dc7e7c950c6cf7279b51f4161b04e6e868aad..c9e1e0bef3c36d0dbfbca99b0cf257e33bfb17cd 100644 (file)
 
 #define UNCORE_FIXED_EVENT             0xff
 #define UNCORE_PMC_IDX_MAX_GENERIC     8
+#define UNCORE_PMC_IDX_MAX_FIXED       1
+#define UNCORE_PMC_IDX_MAX_FREERUNNING 1
 #define UNCORE_PMC_IDX_FIXED           UNCORE_PMC_IDX_MAX_GENERIC
-#define UNCORE_PMC_IDX_MAX             (UNCORE_PMC_IDX_FIXED + 1)
+#define UNCORE_PMC_IDX_FREERUNNING     (UNCORE_PMC_IDX_FIXED + \
+                                       UNCORE_PMC_IDX_MAX_FIXED)
+#define UNCORE_PMC_IDX_MAX             (UNCORE_PMC_IDX_FREERUNNING + \
+                                       UNCORE_PMC_IDX_MAX_FREERUNNING)
 
 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
                ((dev << 24) | (func << 16) | (type << 8) | idx)
@@ -35,6 +40,7 @@ struct intel_uncore_ops;
 struct intel_uncore_pmu;
 struct intel_uncore_box;
 struct uncore_event_desc;
+struct freerunning_counters;
 
 struct intel_uncore_type {
        const char *name;
@@ -42,6 +48,7 @@ struct intel_uncore_type {
        int num_boxes;
        int perf_ctr_bits;
        int fixed_ctr_bits;
+       int num_freerunning_types;
        unsigned perf_ctr;
        unsigned event_ctl;
        unsigned event_mask;
@@ -59,6 +66,7 @@ struct intel_uncore_type {
        struct intel_uncore_pmu *pmus;
        struct intel_uncore_ops *ops;
        struct uncore_event_desc *event_descs;
+       struct freerunning_counters *freerunning;
        const struct attribute_group *attr_groups[4];
        struct pmu *pmu; /* for custom pmu ops */
 };
@@ -129,6 +137,14 @@ struct uncore_event_desc {
        const char *config;
 };
 
+struct freerunning_counters {
+       unsigned int counter_base;
+       unsigned int counter_offset;
+       unsigned int box_offset;
+       unsigned int num_counters;
+       unsigned int bits;
+};
+
 struct pci2phy_map {
        struct list_head list;
        int segment;
@@ -157,6 +173,16 @@ static ssize_t __uncore_##_var##_show(struct kobject *kobj,                \
 static struct kobj_attribute format_attr_##_var =                      \
        __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
 
+static inline bool uncore_pmc_fixed(int idx)
+{
+       return idx == UNCORE_PMC_IDX_FIXED;
+}
+
+static inline bool uncore_pmc_freerunning(int idx)
+{
+       return idx == UNCORE_PMC_IDX_FREERUNNING;
+}
+
 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
 {
        return box->pmu->type->box_ctl;
@@ -214,6 +240,60 @@ static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
        return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
 }
 
+
+/*
+ * In the uncore document, there is no event-code assigned to free running
+ * counters. Some events need to be defined to indicate the free running
+ * counters. The events are encoded as event-code + umask-code.
+ *
+ * The event-code for all free running counters is 0xff, which is the same as
+ * the fixed counters.
+ *
+ * The umask-code is used to distinguish a fixed counter and a free running
+ * counter, and different types of free running counters.
+ * - For fixed counters, the umask-code is 0x0X.
+ *   X indicates the index of the fixed counter, which starts from 0.
+ * - For free running counters, the umask-code uses the rest of the space.
+ *   It would bare the format of 0xXY.
+ *   X stands for the type of free running counters, which starts from 1.
+ *   Y stands for the index of free running counters of same type, which
+ *   starts from 0.
+ *
+ * For example, there are three types of IIO free running counters on Skylake
+ * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
+ * The event-code for all the free running counters is 0xff.
+ * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
+ * which umask-code starts from 0x10.
+ * So 'ioclk' is encoded as event=0xff,umask=0x10
+ * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
+ * the second type, which umask-code starts from 0x20.
+ * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
+ */
+static inline unsigned int uncore_freerunning_idx(u64 config)
+{
+       return ((config >> 8) & 0xf);
+}
+
+#define UNCORE_FREERUNNING_UMASK_START         0x10
+
+static inline unsigned int uncore_freerunning_type(u64 config)
+{
+       return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
+}
+
+static inline
+unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       unsigned int type = uncore_freerunning_type(event->attr.config);
+       unsigned int idx = uncore_freerunning_idx(event->attr.config);
+       struct intel_uncore_pmu *pmu = box->pmu;
+
+       return pmu->type->freerunning[type].counter_base +
+              pmu->type->freerunning[type].counter_offset * idx +
+              pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
+}
+
 static inline
 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
 {
@@ -276,11 +356,52 @@ static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
        return box->pmu->type->fixed_ctr_bits;
 }
 
+static inline
+unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
+                                    struct perf_event *event)
+{
+       unsigned int type = uncore_freerunning_type(event->attr.config);
+
+       return box->pmu->type->freerunning[type].bits;
+}
+
+static inline int uncore_num_freerunning(struct intel_uncore_box *box,
+                                        struct perf_event *event)
+{
+       unsigned int type = uncore_freerunning_type(event->attr.config);
+
+       return box->pmu->type->freerunning[type].num_counters;
+}
+
+static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
+                                              struct perf_event *event)
+{
+       return box->pmu->type->num_freerunning_types;
+}
+
+static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
+                                                struct perf_event *event)
+{
+       unsigned int type = uncore_freerunning_type(event->attr.config);
+       unsigned int idx = uncore_freerunning_idx(event->attr.config);
+
+       return (type < uncore_num_freerunning_types(box, event)) &&
+              (idx < uncore_num_freerunning(box, event));
+}
+
 static inline int uncore_num_counters(struct intel_uncore_box *box)
 {
        return box->pmu->type->num_counters;
 }
 
+static inline bool is_freerunning_event(struct perf_event *event)
+{
+       u64 cfg = event->attr.config;
+
+       return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
+              (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
+}
+
 static inline void uncore_disable_box(struct intel_uncore_box *box)
 {
        if (box->pmu->type->ops->disable_box)
@@ -346,6 +467,10 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
+void uncore_pmu_event_start(struct perf_event *event, int flags);
+void uncore_pmu_event_stop(struct perf_event *event, int flags);
+int uncore_pmu_event_add(struct perf_event *event, int flags);
+void uncore_pmu_event_del(struct perf_event *event, int flags);
 void uncore_pmu_event_read(struct perf_event *event);
 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
 struct event_constraint *
index 93e7a8397cde249625a624ffa2c3f248f3b21009..173e2674be6ef24293c5113b43d738af3d3f1981 100644 (file)
@@ -246,7 +246,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
+       if (hwc->idx == UNCORE_PMC_IDX_FIXED)
                wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
        else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
                wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
index aee5e8496be4b7e409e3db3e51898f3bfc1671d9..8527c3e1038b78d868743274c35368ab318649ca 100644 (file)
@@ -285,6 +285,15 @@ static struct uncore_event_desc snb_uncore_imc_events[] = {
 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE    0x5054
 #define SNB_UNCORE_PCI_IMC_CTR_BASE            SNB_UNCORE_PCI_IMC_DATA_READS_BASE
 
+enum perf_snb_uncore_imc_freerunning_types {
+       SNB_PCI_UNCORE_IMC_DATA         = 0,
+       SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
+};
+
+static struct freerunning_counters snb_uncore_imc_freerunning[] = {
+       [SNB_PCI_UNCORE_IMC_DATA]     = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
+};
+
 static struct attribute *snb_uncore_imc_formats_attr[] = {
        &format_attr_event.attr,
        NULL,
@@ -341,9 +350,8 @@ static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf
 }
 
 /*
- * custom event_init() function because we define our own fixed, free
- * running counters, so we do not want to conflict with generic uncore
- * logic. Also simplifies processing
+ * Keep the custom event_init() function compatible with old event
+ * encoding for free running counters.
  */
 static int snb_uncore_imc_event_init(struct perf_event *event)
 {
@@ -405,11 +413,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
        switch (cfg) {
        case SNB_UNCORE_PCI_IMC_DATA_READS:
                base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
-               idx = UNCORE_PMC_IDX_FIXED;
+               idx = UNCORE_PMC_IDX_FREERUNNING;
                break;
        case SNB_UNCORE_PCI_IMC_DATA_WRITES:
                base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
-               idx = UNCORE_PMC_IDX_FIXED + 1;
+               idx = UNCORE_PMC_IDX_FREERUNNING;
                break;
        default:
                return -EINVAL;
@@ -430,75 +438,6 @@ static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_ev
        return 0;
 }
 
-static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
-{
-       struct intel_uncore_box *box = uncore_event_to_box(event);
-       u64 count;
-
-       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
-               return;
-
-       event->hw.state = 0;
-       box->n_active++;
-
-       list_add_tail(&event->active_entry, &box->active_list);
-
-       count = snb_uncore_imc_read_counter(box, event);
-       local64_set(&event->hw.prev_count, count);
-
-       if (box->n_active == 1)
-               uncore_pmu_start_hrtimer(box);
-}
-
-static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
-{
-       struct intel_uncore_box *box = uncore_event_to_box(event);
-       struct hw_perf_event *hwc = &event->hw;
-
-       if (!(hwc->state & PERF_HES_STOPPED)) {
-               box->n_active--;
-
-               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
-               hwc->state |= PERF_HES_STOPPED;
-
-               list_del(&event->active_entry);
-
-               if (box->n_active == 0)
-                       uncore_pmu_cancel_hrtimer(box);
-       }
-
-       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
-               /*
-                * Drain the remaining delta count out of a event
-                * that we are disabling:
-                */
-               uncore_perf_event_update(box, event);
-               hwc->state |= PERF_HES_UPTODATE;
-       }
-}
-
-static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
-{
-       struct intel_uncore_box *box = uncore_event_to_box(event);
-       struct hw_perf_event *hwc = &event->hw;
-
-       if (!box)
-               return -ENODEV;
-
-       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
-       if (!(flags & PERF_EF_START))
-               hwc->state |= PERF_HES_ARCH;
-
-       snb_uncore_imc_event_start(event, 0);
-
-       return 0;
-}
-
-static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
-{
-       snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
-}
-
 int snb_pci2phy_map_init(int devid)
 {
        struct pci_dev *dev = NULL;
@@ -530,10 +469,10 @@ int snb_pci2phy_map_init(int devid)
 static struct pmu snb_uncore_imc_pmu = {
        .task_ctx_nr    = perf_invalid_context,
        .event_init     = snb_uncore_imc_event_init,
-       .add            = snb_uncore_imc_event_add,
-       .del            = snb_uncore_imc_event_del,
-       .start          = snb_uncore_imc_event_start,
-       .stop           = snb_uncore_imc_event_stop,
+       .add            = uncore_pmu_event_add,
+       .del            = uncore_pmu_event_del,
+       .start          = uncore_pmu_event_start,
+       .stop           = uncore_pmu_event_stop,
        .read           = uncore_pmu_event_read,
 };
 
@@ -552,12 +491,10 @@ static struct intel_uncore_type snb_uncore_imc = {
        .name           = "imc",
        .num_counters   = 2,
        .num_boxes      = 1,
-       .fixed_ctr_bits = 32,
-       .fixed_ctr      = SNB_UNCORE_PCI_IMC_CTR_BASE,
+       .num_freerunning_types  = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
+       .freerunning    = snb_uncore_imc_freerunning,
        .event_descs    = snb_uncore_imc_events,
        .format_group   = &snb_uncore_imc_format_group,
-       .perf_ctr       = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
-       .event_mask     = SNB_UNCORE_PCI_IMC_EVENT_MASK,
        .ops            = &snb_uncore_imc_ops,
        .pmu            = &snb_uncore_imc_pmu,
 };
index 77076a102e34a46252a9758d16f6e8f4fefc4012..87dc0263a2e1e97bd210d1296a82a2330933f558 100644 (file)
@@ -3522,6 +3522,87 @@ static struct intel_uncore_type skx_uncore_iio = {
        .format_group           = &skx_uncore_iio_format_group,
 };
 
+enum perf_uncore_iio_freerunning_type_id {
+       SKX_IIO_MSR_IOCLK                       = 0,
+       SKX_IIO_MSR_BW                          = 1,
+       SKX_IIO_MSR_UTIL                        = 2,
+
+       SKX_IIO_FREERUNNING_TYPE_MAX,
+};
+
+
+static struct freerunning_counters skx_iio_freerunning[] = {
+       [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
+       [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
+       [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
+};
+
+static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
+       /* Free-Running IO CLOCKS Counter */
+       INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
+       /* Free-Running IIO BANDWIDTH Counters */
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
+       /* Free-running IIO UTILIZATION Counters */
+       INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
+       INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
+       INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
+       INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
+       INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
+       INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
+       INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
+       INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
+       { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
+       .read_counter           = uncore_msr_read_counter,
+};
+
+static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       NULL,
+};
+
+static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
+       .name = "format",
+       .attrs = skx_uncore_iio_freerunning_formats_attr,
+};
+
+static struct intel_uncore_type skx_uncore_iio_free_running = {
+       .name                   = "iio_free_running",
+       .num_counters           = 17,
+       .num_boxes              = 6,
+       .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
+       .freerunning            = skx_iio_freerunning,
+       .ops                    = &skx_uncore_iio_freerunning_ops,
+       .event_descs            = skx_uncore_iio_freerunning_events,
+       .format_group           = &skx_uncore_iio_freerunning_format_group,
+};
+
 static struct attribute *skx_uncore_formats_attr[] = {
        &format_attr_event.attr,
        &format_attr_umask.attr,
@@ -3595,6 +3676,7 @@ static struct intel_uncore_type *skx_msr_uncores[] = {
        &skx_uncore_ubox,
        &skx_uncore_chabox,
        &skx_uncore_iio,
+       &skx_uncore_iio_free_running,
        &skx_uncore_irp,
        &skx_uncore_pcu,
        NULL,
index e47b2dbbdef3d14189f9e04942fba3f074be3b16..c06c4c16c6b69c0d251505fa4c03a658c5f938a6 100644 (file)
@@ -151,17 +151,19 @@ void perf_get_regs_user(struct perf_regs *regs_user,
        regs_user_copy->sp = user_regs->sp;
        regs_user_copy->cs = user_regs->cs;
        regs_user_copy->ss = user_regs->ss;
-
        /*
-        * Most system calls don't save these registers, don't report them.
+        * Store user space frame-pointer value on sample
+        * to facilitate stack unwinding for cases when
+        * user space executable code has such support
+        * enabled at compile time:
         */
+       regs_user_copy->bp = user_regs->bp;
+
        regs_user_copy->bx = -1;
-       regs_user_copy->bp = -1;
        regs_user_copy->r12 = -1;
        regs_user_copy->r13 = -1;
        regs_user_copy->r14 = -1;
        regs_user_copy->r15 = -1;
-
        /*
         * For this to be at all useful, we need a reasonable guess for
         * the ABI.  Be careful: we're in NMI context, and we're
index e71e99eb9a4e06602c9d421193a4e04661f667cd..bea0b0cd4bf7e4742a63c678ff58151230db69cb 100644 (file)
@@ -467,7 +467,7 @@ enum perf_addr_filter_action_t {
  */
 struct perf_addr_filter {
        struct list_head        entry;
-       struct inode            *inode;
+       struct path             path;
        unsigned long           offset;
        unsigned long           size;
        enum perf_addr_filter_action_t  action;
@@ -1016,6 +1016,14 @@ static inline int is_software_event(struct perf_event *event)
        return event->event_caps & PERF_EV_CAP_SOFTWARE;
 }
 
+/*
+ * Return 1 for event in sw context, 0 for event in hw context
+ */
+static inline int in_software_context(struct perf_event *event)
+{
+       return event->ctx->pmu->task_ctx_nr == perf_sw_context;
+}
+
 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
index 67612ce359adc45efe1a721ced1d7c4a78aff75d..08f5e1b42b435b66f23531a578b2f5eabeb251f1 100644 (file)
@@ -5120,6 +5120,8 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd,
        switch (_IOC_NR(cmd)) {
        case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
        case _IOC_NR(PERF_EVENT_IOC_ID):
+       case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF):
+       case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES):
                /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
                if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
                        cmd &= ~IOCSIZE_MASK;
@@ -6668,7 +6670,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
 
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               if (filter->inode) {
+               if (filter->path.dentry) {
                        event->addr_filters_offs[count] = 0;
                        restart++;
                }
@@ -7333,7 +7335,7 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
                                     struct file *file, unsigned long offset,
                                     unsigned long size)
 {
-       if (filter->inode != file_inode(file))
+       if (d_inode(filter->path.dentry) != file_inode(file))
                return false;
 
        if (filter->offset > offset + size)
@@ -8686,8 +8688,7 @@ static void free_filters_list(struct list_head *filters)
        struct perf_addr_filter *filter, *iter;
 
        list_for_each_entry_safe(filter, iter, filters, entry) {
-               if (filter->inode)
-                       iput(filter->inode);
+               path_put(&filter->path);
                list_del(&filter->entry);
                kfree(filter);
        }
@@ -8784,7 +8785,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
                 * Adjust base offset if the filter is associated to a binary
                 * that needs to be mapped:
                 */
-               if (filter->inode)
+               if (filter->path.dentry)
                        event->addr_filters_offs[count] =
                                perf_addr_filter_apply(filter, mm);
 
@@ -8858,7 +8859,6 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
 {
        struct perf_addr_filter *filter = NULL;
        char *start, *orig, *filename = NULL;
-       struct path path;
        substring_t args[MAX_OPT_ARGS];
        int state = IF_STATE_ACTION, token;
        unsigned int kernel = 0;
@@ -8971,19 +8971,18 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                                        goto fail_free_name;
 
                                /* look up the path and grab its inode */
-                               ret = kern_path(filename, LOOKUP_FOLLOW, &path);
+                               ret = kern_path(filename, LOOKUP_FOLLOW,
+                                               &filter->path);
                                if (ret)
                                        goto fail_free_name;
 
-                               filter->inode = igrab(d_inode(path.dentry));
-                               path_put(&path);
                                kfree(filename);
                                filename = NULL;
 
                                ret = -EINVAL;
-                               if (!filter->inode ||
-                                   !S_ISREG(filter->inode->i_mode))
-                                       /* free_filters_list() will iput() */
+                               if (!filter->path.dentry ||
+                                   !S_ISREG(d_inode(filter->path.dentry)
+                                            ->i_mode))
                                        goto fail;
 
                                event->addr_filters.nr_file_filters++;
@@ -10521,19 +10520,20 @@ SYSCALL_DEFINE5(perf_event_open,
        if (pmu->task_ctx_nr == perf_sw_context)
                event->event_caps |= PERF_EV_CAP_SOFTWARE;
 
-       if (group_leader &&
-           (is_software_event(event) != is_software_event(group_leader))) {
-               if (is_software_event(event)) {
+       if (group_leader) {
+               if (is_software_event(event) &&
+                   !in_software_context(group_leader)) {
                        /*
-                        * If event and group_leader are not both a software
-                        * event, and event is, then group leader is not.
+                        * If the event is a sw event, but the group_leader
+                        * is on hw context.
                         *
-                        * Allow the addition of software events to !software
-                        * groups, this is safe because software events never
-                        * fail to schedule.
+                        * Allow the addition of software events to hw
+                        * groups, this is safe because software events
+                        * never fail to schedule.
                         */
-                       pmu = group_leader->pmu;
-               } else if (is_software_event(group_leader) &&
+                       pmu = group_leader->ctx->pmu;
+               } else if (!is_software_event(event) &&
+                          is_software_event(group_leader) &&
                           (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
                        /*
                         * In case the group is a pure software group, and we
index a3a4427441bfe97fac07605fdd2c9fe0bcc8c213..70fe612957338c84ffeb61f03c358c6ca1e32789 100644 (file)
@@ -21,6 +21,9 @@
 /* &a[0] degrades to a pointer: a different type from an array */
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 
+#ifndef __pure
+#define  __pure                __attribute__((pure))
+#endif
 #define  noinline      __attribute__((noinline))
 #ifndef __packed
 #define __packed       __attribute__((packed))
index 7b7fd0b1855105bac16583a19a0474d26d3c3234..120037496f77484bc002da4b24ae4f14340e1251 100644 (file)
 
 #include "tracing_path.h"
 
-
-char tracing_mnt[PATH_MAX]         = "/sys/kernel/debug";
-char tracing_path[PATH_MAX]        = "/sys/kernel/debug/tracing";
-char tracing_events_path[PATH_MAX] = "/sys/kernel/debug/tracing/events";
-
+static char tracing_mnt[PATH_MAX]  = "/sys/kernel/debug";
+static char tracing_path[PATH_MAX]        = "/sys/kernel/debug/tracing";
+static char tracing_events_path[PATH_MAX] = "/sys/kernel/debug/tracing/events";
 
 static void __tracing_path_set(const char *tracing, const char *mountpoint)
 {
@@ -76,7 +74,7 @@ char *get_tracing_file(const char *name)
 {
        char *file;
 
-       if (asprintf(&file, "%s/%s", tracing_path, name) < 0)
+       if (asprintf(&file, "%s/%s", tracing_path_mount(), name) < 0)
                return NULL;
 
        return file;
@@ -87,6 +85,34 @@ void put_tracing_file(char *file)
        free(file);
 }
 
+char *get_events_file(const char *name)
+{
+       char *file;
+
+       if (asprintf(&file, "%s/events/%s", tracing_path_mount(), name) < 0)
+               return NULL;
+
+       return file;
+}
+
+void put_events_file(char *file)
+{
+       free(file);
+}
+
+DIR *tracing_events__opendir(void)
+{
+       DIR *dir = NULL;
+       char *path = get_tracing_file("events");
+
+       if (path) {
+               dir = opendir(path);
+               put_events_file(path);
+       }
+
+       return dir;
+}
+
 int tracing_path__strerror_open_tp(int err, char *buf, size_t size,
                                   const char *sys, const char *name)
 {
@@ -129,7 +155,7 @@ int tracing_path__strerror_open_tp(int err, char *buf, size_t size,
                snprintf(buf, size,
                         "Error:\tNo permissions to read %s/%s\n"
                         "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
-                        tracing_events_path, filename, tracing_mnt);
+                        tracing_events_path, filename, tracing_path_mount());
        }
                break;
        default:
index 0066f06cc3817c1a45e85385449902e0d68571f8..a19136b086dce3ed8dcd23a7f4c5d6c5824b03ff 100644 (file)
@@ -3,9 +3,9 @@
 #define __API_FS_TRACING_PATH_H
 
 #include <linux/types.h>
+#include <dirent.h>
 
-extern char tracing_path[];
-extern char tracing_events_path[];
+DIR *tracing_events__opendir(void);
 
 void tracing_path_set(const char *mountpoint);
 const char *tracing_path_mount(void);
@@ -13,5 +13,10 @@ const char *tracing_path_mount(void);
 char *get_tracing_file(const char *name);
 void put_tracing_file(char *file);
 
+char *get_events_file(const char *name);
+void put_events_file(char *file);
+
+#define zput_events_file(ptr) ({ free(*ptr); *ptr = NULL; })
+
 int tracing_path__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
 #endif /* __API_FS_TRACING_PATH_H */
index 689b6a130dd7589a428c3f9632b139ac77b0bf27..96d830545bbb43168a2719c92793a76af754bb68 100644 (file)
@@ -10,6 +10,12 @@ u8 kallsyms2elf_type(char type)
        return (type == 't' || type == 'w') ? STT_FUNC : STT_OBJECT;
 }
 
+bool kallsyms__is_function(char symbol_type)
+{
+       symbol_type = toupper(symbol_type);
+       return symbol_type == 'T' || symbol_type == 'W';
+}
+
 int kallsyms__parse(const char *filename, void *arg,
                    int (*process_symbol)(void *arg, const char *name,
                                          char type, u64 start))
index bc40101d72c189a882842f84ee492924a50e29ce..72ab9870454baf15052fabb233ebcc04b27f94e3 100644 (file)
@@ -20,6 +20,8 @@ static inline u8 kallsyms2elf_binding(char type)
 
 u8 kallsyms2elf_type(char type);
 
+bool kallsyms__is_function(char symbol_type);
+
 int kallsyms__parse(const char *filename, void *arg,
                    int (*process_symbol)(void *arg, const char *name,
                                          char type, u64 start));
index db11478e30b4a9654874123e4586d17a278e9f44..42261a9b280e3e11721274b76503a9db1ebeac3a 100644 (file)
@@ -47,7 +47,8 @@ man5dir=$(mandir)/man5
 man7dir=$(mandir)/man7
 
 ASCIIDOC=asciidoc
-ASCIIDOC_EXTRA = --unsafe
+ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf
+ASCIIDOC_HTML = xhtml11
 MANPAGE_XSL = manpage-normal.xsl
 XMLTO_EXTRA =
 INSTALL?=install
@@ -55,6 +56,14 @@ RM ?= rm -f
 DOC_REF = origin/man
 HTML_REF = origin/html
 
+ifdef USE_ASCIIDOCTOR
+ASCIIDOC = asciidoctor
+ASCIIDOC_EXTRA = -a compat-mode
+ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
+ASCIIDOC_EXTRA += -a mansource="perf" -a manmanual="perf Manual"
+ASCIIDOC_HTML = xhtml5
+endif
+
 infodir?=$(prefix)/share/info
 MAKEINFO=makeinfo
 INSTALL_INFO=install-info
@@ -73,10 +82,12 @@ ifeq ($(_tmp_tool_path),)
        missing_tools = $(ASCIIDOC)
 endif
 
+ifndef USE_ASCIIDOCTOR
 _tmp_tool_path := $(call get-executable,$(XMLTO))
 ifeq ($(_tmp_tool_path),)
        missing_tools += $(XMLTO)
 endif
+endif
 
 #
 # For asciidoc ...
@@ -264,9 +275,17 @@ clean:
 
 $(MAN_HTML): $(OUTPUT)%.html : %.txt
        $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
-       $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \
+       $(ASCIIDOC) -b $(ASCIIDOC_HTML) -d manpage \
+               $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
+       mv $@+ $@
+
+ifdef USE_ASCIIDOCTOR
+$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt
+       $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+       $(ASCIIDOC) -b manpage -d manpage \
                $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
        mv $@+ $@
+endif
 
 $(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml
        $(QUIET_XMLTO)$(RM) $@ && \
@@ -274,7 +293,7 @@ $(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml
 
 $(OUTPUT)%.xml : %.txt
        $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
-       $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \
+       $(ASCIIDOC) -b docbook -d manpage \
                $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
        mv $@+ $@
 
@@ -321,13 +340,13 @@ howto-index.txt: howto-index.sh $(wildcard howto/*.txt)
        mv $@+ $@
 
 $(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt
-       $(QUIET_ASCIIDOC)$(ASCIIDOC) -b xhtml11 $*.txt
+       $(QUIET_ASCIIDOC)$(ASCIIDOC) -b $(ASCIIDOC_HTML) $*.txt
 
 WEBDOC_DEST = /pub/software/tools/perf/docs
 
 $(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
        $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
-       sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \
+       sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b $(ASCIIDOC_HTML) - >$@+ && \
        mv $@+ $@
 
 # UNIMPLEMENTED
diff --git a/tools/perf/Documentation/asciidoctor-extensions.rb b/tools/perf/Documentation/asciidoctor-extensions.rb
new file mode 100644 (file)
index 0000000..d148fe9
--- /dev/null
@@ -0,0 +1,29 @@
+require 'asciidoctor'
+require 'asciidoctor/extensions'
+
+module Perf
+  module Documentation
+    class LinkPerfProcessor < Asciidoctor::Extensions::InlineMacroProcessor
+      use_dsl
+
+      named :chrome
+
+      def process(parent, target, attrs)
+        if parent.document.basebackend? 'html'
+          %(<a href="#{target}.html">#{target}(#{attrs[1]})</a>\n)
+        elsif parent.document.basebackend? 'manpage'
+          "#{target}(#{attrs[1]})"
+        elsif parent.document.basebackend? 'docbook'
+          "<citerefentry>\n" \
+            "<refentrytitle>#{target}</refentrytitle>" \
+            "<manvolnum>#{attrs[1]}</manvolnum>\n" \
+          "</citerefentry>\n"
+        end
+      end
+    end
+  end
+end
+
+Asciidoctor::Extensions.register do
+  inline_macro Perf::Documentation::LinkPerfProcessor, :linkperf
+end
index 73c2650bd0db565b6bd1fe695bb2504a294f0470..f6de0952ff3c9701eeedb58d6e91f23c6b3fe3c2 100644 (file)
@@ -48,6 +48,9 @@ OPTIONS
 --purge=::
         Purge all cached binaries including older caches which have specified
        path from the cache.
+-P::
+--purge-all::
+       Purge all cached binaries. This will flush out entire cache.
 -M::
 --missing=::
        List missing build ids in the cache for the specified file.
@@ -59,7 +62,9 @@ OPTIONS
        exactly same build-id, that is replaced by new one. It can be used
        to update kallsyms and kernel dso to vmlinux in order to support
        annotation.
-
+-l::
+--list::
+       List all valid binaries from cache.
 -v::
 --verbose::
        Be more verbose.
index e6c3b4e555c257f033fd7bf2ef2635173664dac5..3a822f308e6d1a04f2a04433665985bb9c6a2e1d 100644 (file)
@@ -116,6 +116,22 @@ Do not aggregate counts across all monitored CPUs.
 print counts using a CSV-style output to make it easy to import directly into
 spreadsheets. Columns are separated by the string specified in SEP.
 
+--table:: Display time for each run (-r option), in a table format, e.g.:
+
+  $ perf stat --null -r 5 --table perf bench sched pipe
+
+   Performance counter stats for 'perf bench sched pipe' (5 runs):
+
+             # Table of individual measurements:
+             5.189 (-0.293) #
+             5.189 (-0.294) #
+             5.186 (-0.296) #
+             5.663 (+0.181) ##
+             6.186 (+0.703) ####
+
+             # Final result:
+             5.483 +- 0.198 seconds time elapsed  ( +-  3.62% )
+
 -G name::
 --cgroup name::
 monitor only in the container (cgroup) called "name". This option is available only
index ae7dc46e8f8a3f4e74b53dcf1040a4e0b69a96b9..b5ac356ba323c8a363b96e10082205078f12a3f8 100644 (file)
@@ -885,6 +885,8 @@ endif
 
 # Among the variables below, these:
 #   perfexecdir
+#   perf_include_dir
+#   perf_examples_dir
 #   template_dir
 #   mandir
 #   infodir
@@ -904,6 +906,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative))
 mandir = share/man
 infodir = share/info
 perfexecdir = libexec/perf-core
+perf_include_dir = lib/include/perf
+perf_examples_dir = lib/examples/perf
 sharedir = $(prefix)/share
 template_dir = share/perf-core/templates
 STRACE_GROUPS_DIR = share/perf-core/strace/groups
@@ -934,6 +938,8 @@ bindir_SQ = $(subst ','\'',$(bindir))
 mandir_SQ = $(subst ','\'',$(mandir))
 infodir_SQ = $(subst ','\'',$(infodir))
 perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
+perf_include_dir_SQ = $(subst ','\'',$(perf_include_dir))
+perf_examples_dir_SQ = $(subst ','\'',$(perf_examples_dir))
 template_dir_SQ = $(subst ','\'',$(template_dir))
 htmldir_SQ = $(subst ','\'',$(htmldir))
 tipdir_SQ = $(subst ','\'',$(tipdir))
@@ -944,14 +950,20 @@ srcdir_SQ = $(subst ','\'',$(srcdir))
 
 ifneq ($(filter /%,$(firstword $(perfexecdir))),)
 perfexec_instdir = $(perfexecdir)
+perf_include_instdir = $(perf_include_dir)
+perf_examples_instdir = $(perf_examples_dir)
 STRACE_GROUPS_INSTDIR = $(STRACE_GROUPS_DIR)
 tip_instdir = $(tipdir)
 else
 perfexec_instdir = $(prefix)/$(perfexecdir)
+perf_include_instdir = $(prefix)/$(perf_include_dir)
+perf_examples_instdir = $(prefix)/$(perf_examples_dir)
 STRACE_GROUPS_INSTDIR = $(prefix)/$(STRACE_GROUPS_DIR)
 tip_instdir = $(prefix)/$(tipdir)
 endif
 perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
+perf_include_instdir_SQ = $(subst ','\'',$(perf_include_instdir))
+perf_examples_instdir_SQ = $(subst ','\'',$(perf_examples_instdir))
 STRACE_GROUPS_INSTDIR_SQ = $(subst ','\'',$(STRACE_GROUPS_INSTDIR))
 tip_instdir_SQ = $(subst ','\'',$(tip_instdir))
 
@@ -999,6 +1011,8 @@ $(call detected_var,ETC_PERFCONFIG_SQ)
 $(call detected_var,STRACE_GROUPS_DIR_SQ)
 $(call detected_var,prefix_SQ)
 $(call detected_var,perfexecdir_SQ)
+$(call detected_var,perf_include_dir_SQ)
+$(call detected_var,perf_examples_dir_SQ)
 $(call detected_var,tipdir_SQ)
 $(call detected_var,srcdir_SQ)
 $(call detected_var,LIBDIR)
index 83e453de36f8b5d38eb8fbb332328f7d4d811bba..ecc9fc9526550d114d353a71a2325533172f6f42 100644 (file)
@@ -767,6 +767,16 @@ ifndef NO_JVMTI
 endif
        $(call QUIET_INSTALL, libexec) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ifndef NO_LIBBPF
+       $(call QUIET_INSTALL, lib) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, include/bpf) \
+               $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, lib) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, examples/bpf) \
+               $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+endif
        $(call QUIET_INSTALL, perf-archive) \
                $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
        $(call QUIET_INSTALL, perf-with-kcore) \
index 8cb3477602339e9b94f30bb1edb2175e3828e570..9a0242e74cfc3e5b53736977e588ffc8027b347c 100644 (file)
@@ -25,7 +25,7 @@ static int sample_ustack(struct perf_sample *sample,
 
        sp = (unsigned long) regs[PERF_REG_ARM_SP];
 
-       map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp);
+       map = map_groups__find(thread->mg, (u64)sp);
        if (!map) {
                pr_debug("failed to get stack map\n");
                free(buf);
index e907f0f4c20c0c34c1806dd636990ed452f5d6b0..5522ce384723ae84abd5e8d42bce33239247a9e8 100644 (file)
@@ -25,7 +25,7 @@ static int sample_ustack(struct perf_sample *sample,
 
        sp = (unsigned long) regs[PERF_REG_ARM64_SP];
 
-       map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp);
+       map = map_groups__find(thread->mg, (u64)sp);
        if (!map) {
                pr_debug("failed to get stack map\n");
                free(buf);
index 30cbbd6d5be0c9a1aa337a350387f420eefb3068..5f39efef0856d0294ba817f975dcc56283e0afcc 100644 (file)
@@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
 
        sp = (unsigned long) regs[PERF_REG_POWERPC_R1];
 
-       map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp);
+       map = map_groups__find(thread->mg, (u64)sp);
        if (!map) {
                pr_debug("failed to get stack map\n");
                free(buf);
index 0c370f81e00280c6428ddfe0975584edcb9d02a7..3598b8b75d274c8ebcc6fc0452091dec34c797b7 100644 (file)
@@ -248,8 +248,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
 
        ip = chain->ips[2];
 
-       thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
-                       MAP__FUNCTION, ip, &al);
+       thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
 
        if (al.map)
                dso = al.map->dso;
index 95036c7a59e8f5ab53e723d801c23873e38b7a9a..7879df34569a0d8b3098226f0c7eeb095a059360 100644 (file)
@@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
 
        sp = (unsigned long) regs[PERF_REG_X86_SP];
 
-       map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp);
+       map = map_groups__find(thread->mg, (u64)sp);
        if (!map) {
                pr_debug("failed to get stack map\n");
                free(buf);
index f95e6f46ef0dc64fdf24f41912ad325e70d6be1e..844b8f335532e8aecfdde40972f86f0cf830c2c5 100644 (file)
@@ -4,6 +4,8 @@ libperf-y += pmu.o
 libperf-y += kvm-stat.o
 libperf-y += perf_regs.o
 libperf-y += group.o
+libperf-y += machine.o
+libperf-y += event.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
new file mode 100644 (file)
index 0000000..675a021
--- /dev/null
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include "../../util/machine.h"
+#include "../../util/tool.h"
+#include "../../util/map.h"
+#include "../../util/util.h"
+#include "../../util/debug.h"
+
+#if defined(__x86_64__)
+
+int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
+                                      perf_event__handler_t process,
+                                      struct machine *machine)
+{
+       int rc = 0;
+       struct map *pos;
+       struct map_groups *kmaps = &machine->kmaps;
+       struct maps *maps = &kmaps->maps;
+       union perf_event *event = zalloc(sizeof(event->mmap) +
+                                        machine->id_hdr_size);
+
+       if (!event) {
+               pr_debug("Not enough memory synthesizing mmap event "
+                        "for extra kernel maps\n");
+               return -1;
+       }
+
+       for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+               struct kmap *kmap;
+               size_t size;
+
+               if (!__map__is_extra_kernel_map(pos))
+                       continue;
+
+               kmap = map__kmap(pos);
+
+               size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
+                      PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
+                      machine->id_hdr_size;
+
+               memset(event, 0, size);
+
+               event->mmap.header.type = PERF_RECORD_MMAP;
+
+               /*
+                * kernel uses 0 for user space maps, see kernel/perf_event.c
+                * __perf_event_mmap
+                */
+               if (machine__is_host(machine))
+                       event->header.misc = PERF_RECORD_MISC_KERNEL;
+               else
+                       event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+
+               event->mmap.header.size = size;
+
+               event->mmap.start = pos->start;
+               event->mmap.len   = pos->end - pos->start;
+               event->mmap.pgoff = pos->pgoff;
+               event->mmap.pid   = machine->pid;
+
+               strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
+
+               if (perf_tool__process_synth_event(tool, event, machine,
+                                                  process) != 0) {
+                       rc = -1;
+                       break;
+               }
+       }
+
+       free(event);
+       return rc;
+}
+
+#endif
diff --git a/tools/perf/arch/x86/util/machine.c b/tools/perf/arch/x86/util/machine.c
new file mode 100644 (file)
index 0000000..4520ac5
--- /dev/null
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/string.h>
+#include <stdlib.h>
+
+#include "../../util/machine.h"
+#include "../../util/map.h"
+#include "../../util/symbol.h"
+#include "../../util/sane_ctype.h"
+
+#include <symbol/kallsyms.h>
+
+#if defined(__x86_64__)
+
+struct extra_kernel_map_info {
+       int cnt;
+       int max_cnt;
+       struct extra_kernel_map *maps;
+       bool get_entry_trampolines;
+       u64 entry_trampoline;
+};
+
+static int add_extra_kernel_map(struct extra_kernel_map_info *mi, u64 start,
+                               u64 end, u64 pgoff, const char *name)
+{
+       if (mi->cnt >= mi->max_cnt) {
+               void *buf;
+               size_t sz;
+
+               mi->max_cnt = mi->max_cnt ? mi->max_cnt * 2 : 32;
+               sz = sizeof(struct extra_kernel_map) * mi->max_cnt;
+               buf = realloc(mi->maps, sz);
+               if (!buf)
+                       return -1;
+               mi->maps = buf;
+       }
+
+       mi->maps[mi->cnt].start = start;
+       mi->maps[mi->cnt].end   = end;
+       mi->maps[mi->cnt].pgoff = pgoff;
+       strlcpy(mi->maps[mi->cnt].name, name, KMAP_NAME_LEN);
+
+       mi->cnt += 1;
+
+       return 0;
+}
+
+static int find_extra_kernel_maps(void *arg, const char *name, char type,
+                                 u64 start)
+{
+       struct extra_kernel_map_info *mi = arg;
+
+       if (!mi->entry_trampoline && kallsyms2elf_binding(type) == STB_GLOBAL &&
+           !strcmp(name, "_entry_trampoline")) {
+               mi->entry_trampoline = start;
+               return 0;
+       }
+
+       if (is_entry_trampoline(name)) {
+               u64 end = start + page_size;
+
+               return add_extra_kernel_map(mi, start, end, 0, name);
+       }
+
+       return 0;
+}
+
+int machine__create_extra_kernel_maps(struct machine *machine,
+                                     struct dso *kernel)
+{
+       struct extra_kernel_map_info mi = { .cnt = 0, };
+       char filename[PATH_MAX];
+       int ret;
+       int i;
+
+       machine__get_kallsyms_filename(machine, filename, PATH_MAX);
+
+       if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+               return 0;
+
+       ret = kallsyms__parse(filename, &mi, find_extra_kernel_maps);
+       if (ret)
+               goto out_free;
+
+       if (!mi.entry_trampoline)
+               goto out_free;
+
+       for (i = 0; i < mi.cnt; i++) {
+               struct extra_kernel_map *xm = &mi.maps[i];
+
+               xm->pgoff = mi.entry_trampoline;
+               ret = machine__create_extra_kernel_map(machine, kernel, xm);
+               if (ret)
+                       goto out_free;
+       }
+
+       machine->trampolines_mapped = mi.cnt;
+out_free:
+       free(mi.maps);
+       return ret;
+}
+
+#endif
index 51709a961496ded0e64071b3173eb273c4890b58..da57042402397075f873529527489e4b390862f2 100644 (file)
@@ -45,6 +45,7 @@ struct perf_annotate {
        bool       print_line;
        bool       skip_missing;
        bool       has_br_stack;
+       bool       group_set;
        const char *sym_hist_filter;
        const char *cpu_list;
        DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -228,7 +229,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
                 */
                if (al->sym != NULL) {
                        rb_erase(&al->sym->rb_node,
-                                &al->map->dso->symbols[al->map->type]);
+                                &al->map->dso->symbols);
                        symbol__delete(al->sym);
                        dso__reset_find_symbol_cache(al->map->dso);
                }
@@ -508,6 +509,9 @@ int cmd_annotate(int argc, const char **argv)
                    "Don't shorten the displayed pathnames"),
        OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
                    "Skip symbols that cannot be annotated"),
+       OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group,
+                       &annotate.group_set,
+                       "Show event group information together"),
        OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
        OPT_CALLBACK(0, "symfs", NULL, "directory",
                     "Look for files with symbols relative to this directory",
@@ -570,6 +574,9 @@ int cmd_annotate(int argc, const char **argv)
        annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
                                                      HEADER_BRANCH_STACK);
 
+       if (annotate.group_set)
+               perf_evlist__force_leader(annotate.session->evlist);
+
        ret = symbol__annotation_init();
        if (ret < 0)
                goto out_delete;
index 41db2cba77eb96d4620982e1754e0b182b175e13..115110a4796a1aa60ab8730066aad7df7522fd3a 100644 (file)
@@ -25,6 +25,7 @@
 #include "util/session.h"
 #include "util/symbol.h"
 #include "util/time-utils.h"
+#include "util/probe-file.h"
 
 static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
 {
@@ -239,6 +240,34 @@ static int build_id_cache__purge_path(const char *pathname, struct nsinfo *nsi)
        return err;
 }
 
+static int build_id_cache__purge_all(void)
+{
+       struct strlist *list;
+       struct str_node *pos;
+       int err = 0;
+       char *buf;
+
+       list = build_id_cache__list_all(false);
+       if (!list) {
+               pr_debug("Failed to get buildids: -%d\n", errno);
+               return -EINVAL;
+       }
+
+       strlist__for_each_entry(pos, list) {
+               buf = build_id_cache__origname(pos->s);
+               err = build_id_cache__remove_s(pos->s);
+               pr_debug("Removing %s (%s): %s\n", buf, pos->s,
+                        err ? "FAIL" : "Ok");
+               free(buf);
+               if (err)
+                       break;
+       }
+       strlist__delete(list);
+
+       pr_debug("Purged all: %s\n", err ? "FAIL" : "Ok");
+       return err;
+}
+
 static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
 {
        char filename[PATH_MAX];
@@ -297,6 +326,26 @@ static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi)
        return err;
 }
 
+static int build_id_cache__show_all(void)
+{
+       struct strlist *bidlist;
+       struct str_node *nd;
+       char *buf;
+
+       bidlist = build_id_cache__list_all(true);
+       if (!bidlist) {
+               pr_debug("Failed to get buildids: -%d\n", errno);
+               return -1;
+       }
+       strlist__for_each_entry(nd, bidlist) {
+               buf = build_id_cache__origname(nd->s);
+               fprintf(stdout, "%s %s\n", nd->s, buf);
+               free(buf);
+       }
+       strlist__delete(bidlist);
+       return 0;
+}
+
 int cmd_buildid_cache(int argc, const char **argv)
 {
        struct strlist *list;
@@ -304,6 +353,9 @@ int cmd_buildid_cache(int argc, const char **argv)
        int ret = 0;
        int ns_id = -1;
        bool force = false;
+       bool list_files = false;
+       bool opts_flag = false;
+       bool purge_all = false;
        char const *add_name_list_str = NULL,
                   *remove_name_list_str = NULL,
                   *purge_name_list_str = NULL,
@@ -327,6 +379,8 @@ int cmd_buildid_cache(int argc, const char **argv)
                    "file(s) to remove"),
        OPT_STRING('p', "purge", &purge_name_list_str, "file list",
                    "file(s) to remove (remove old caches too)"),
+       OPT_BOOLEAN('P', "purge-all", &purge_all, "purge all cached files"),
+       OPT_BOOLEAN('l', "list", &list_files, "list all cached files"),
        OPT_STRING('M', "missing", &missing_filename, "file",
                   "to find missing build ids in the cache"),
        OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
@@ -344,11 +398,20 @@ int cmd_buildid_cache(int argc, const char **argv)
        argc = parse_options(argc, argv, buildid_cache_options,
                             buildid_cache_usage, 0);
 
-       if (argc || (!add_name_list_str && !kcore_filename &&
-                    !remove_name_list_str && !purge_name_list_str &&
-                    !missing_filename && !update_name_list_str))
+       opts_flag = add_name_list_str || kcore_filename ||
+               remove_name_list_str || purge_name_list_str ||
+               missing_filename || update_name_list_str ||
+               purge_all;
+
+       if (argc || !(list_files || opts_flag))
                usage_with_options(buildid_cache_usage, buildid_cache_options);
 
+       /* -l is exclusive. It can not be used with other options. */
+       if (list_files && opts_flag) {
+               usage_with_options_msg(buildid_cache_usage,
+                       buildid_cache_options, "-l is exclusive.\n");
+       }
+
        if (ns_id > 0)
                nsi = nsinfo__new(ns_id);
 
@@ -366,6 +429,11 @@ int cmd_buildid_cache(int argc, const char **argv)
 
        setup_pager();
 
+       if (list_files) {
+               ret = build_id_cache__show_all();
+               goto out;
+       }
+
        if (add_name_list_str) {
                list = strlist__new(add_name_list_str, NULL);
                if (list) {
@@ -420,6 +488,13 @@ int cmd_buildid_cache(int argc, const char **argv)
                }
        }
 
+       if (purge_all) {
+               if (build_id_cache__purge_all()) {
+                       pr_warning("Couldn't remove some caches. Error: %s.\n",
+                               str_error_r(errno, sbuf, sizeof(sbuf)));
+               }
+       }
+
        if (missing_filename)
                ret = build_id_cache__fprintf_missing(session, stdout);
 
index 40fe919bbcf333cb0ba3ff7fccbe9bc5ac9ebbad..a3b346359ba00c406735d2fb7687bbc02b9db7fb 100644 (file)
@@ -440,9 +440,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
                goto repipe;
        }
 
-       thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, &al);
-
-       if (al.map != NULL) {
+       if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
                if (!al.map->dso->hit) {
                        al.map->dso->hit = 1;
                        if (map__load(al.map) >= 0) {
index bcfb363112d3c999b7b09d2d99b771c04358fdfa..90d1a2305b7288450830042f90f3ee7b66e1290b 100644 (file)
@@ -27,7 +27,7 @@ static int __cmd_kallsyms(int argc, const char **argv)
 
        for (i = 0; i < argc; ++i) {
                struct map *map;
-               struct symbol *symbol = machine__find_kernel_function_by_name(machine, argv[i], &map);
+               struct symbol *symbol = machine__find_kernel_symbol_by_name(machine, argv[i], &map);
 
                if (symbol == NULL) {
                        printf("%s: not found\n", argv[i]);
index ae11e4c3516abd117445fe25086ef2086f6c77ec..54d3f21b0e623eced87ba6b57589b05152150f55 100644 (file)
@@ -1004,7 +1004,7 @@ static void __print_slab_result(struct rb_root *root,
                if (is_caller) {
                        addr = data->call_site;
                        if (!raw_ip)
-                               sym = machine__find_kernel_function(machine, addr, &map);
+                               sym = machine__find_kernel_symbol(machine, addr, &map);
                } else
                        addr = data->ptr;
 
@@ -1068,7 +1068,7 @@ static void __print_page_alloc_result(struct perf_session *session, int n_lines)
                char *caller = buf;
 
                data = rb_entry(next, struct page_stat, node);
-               sym = machine__find_kernel_function(machine, data->callsite, &map);
+               sym = machine__find_kernel_symbol(machine, data->callsite, &map);
                if (sym)
                        caller = sym->name;
                else
@@ -1110,7 +1110,7 @@ static void __print_page_caller_result(struct perf_session *session, int n_lines
                char *caller = buf;
 
                data = rb_entry(next, struct page_stat, node);
-               sym = machine__find_kernel_function(machine, data->callsite, &map);
+               sym = machine__find_kernel_symbol(machine, data->callsite, &map);
                if (sym)
                        caller = sym->name;
                else
index 0f198f6d9b77b049078606493bc8a92019b25f68..ad978e3ee2b8708dddffaf513fc39b393962e8eb 100644 (file)
@@ -194,20 +194,11 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
        return err;
 }
 
-/*
- * Events in data file are not collect in groups, but we still want
- * the group display. Set the artificial group and set the leader's
- * forced_leader flag to notify the display code.
- */
 static void setup_forced_leader(struct report *report,
                                struct perf_evlist *evlist)
 {
-       if (report->group_set && !evlist->nr_groups) {
-               struct perf_evsel *leader = perf_evlist__first(evlist);
-
-               perf_evlist__set_leader(evlist);
-               leader->forced_leader = true;
-       }
+       if (report->group_set)
+               perf_evlist__force_leader(evlist);
 }
 
 static int process_feature_event(struct perf_tool *tool,
@@ -523,12 +514,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
                    "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
                    "can't be resolved.";
 
-               if (kernel_map) {
-                       const struct dso *kdso = kernel_map->dso;
-                       if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) {
-                               desc = "If some relocation was applied (e.g. "
-                                      "kexec) symbols may be misresolved.";
-                       }
+               if (kernel_map && map__has_symbols(kernel_map)) {
+                       desc = "If some relocation was applied (e.g. "
+                              "kexec) symbols may be misresolved.";
                }
 
                ui__warning(
@@ -718,10 +706,7 @@ static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
 
 static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp)
 {
-       int printed = 0, i;
-       for (i = 0; i < MAP__NR_TYPES; ++i)
-               printed += maps__fprintf_task(&mg->maps[i], indent, fp);
-       return printed;
+       return maps__fprintf_task(&mg->maps, indent, fp);
 }
 
 static void task__print_level(struct task *task, FILE *fp, int level)
index e0a9845b6cbc57db5e13426d536a18b771202717..cefc8813e91e5a84c3e7ad876827ecfb91dbe212 100644 (file)
@@ -153,8 +153,8 @@ static struct {
                .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
                              PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
                              PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
-                             PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
-                             PERF_OUTPUT_PERIOD,
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
 
                .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
        },
@@ -165,8 +165,9 @@ static struct {
                .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
                              PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
                              PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
-                             PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
-                             PERF_OUTPUT_PERIOD | PERF_OUTPUT_BPF_OUTPUT,
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
+                             PERF_OUTPUT_BPF_OUTPUT,
 
                .invalid_fields = PERF_OUTPUT_TRACE,
        },
@@ -185,10 +186,10 @@ static struct {
                .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
                              PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
                              PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
-                             PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
-                             PERF_OUTPUT_PERIOD |  PERF_OUTPUT_ADDR |
-                             PERF_OUTPUT_DATA_SRC | PERF_OUTPUT_WEIGHT |
-                             PERF_OUTPUT_PHYS_ADDR,
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
+                             PERF_OUTPUT_ADDR | PERF_OUTPUT_DATA_SRC |
+                             PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR,
 
                .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
        },
@@ -199,8 +200,8 @@ static struct {
                .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
                              PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
                              PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
-                             PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
-                             PERF_OUTPUT_PERIOD,
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
 
                .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
        },
@@ -211,8 +212,8 @@ static struct {
                .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
                              PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
                              PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
-                             PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
-                             PERF_OUTPUT_SYNTH,
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_SYNTH,
 
                .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
        },
@@ -544,6 +545,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
                        if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) {
                                output[j].fields |= PERF_OUTPUT_IP;
                                output[j].fields |= PERF_OUTPUT_SYM;
+                               output[j].fields |= PERF_OUTPUT_SYMOFFSET;
                                output[j].fields |= PERF_OUTPUT_DSO;
                                set_print_ip_opts(attr);
                                goto out;
@@ -717,8 +719,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
                if (PRINT_FIELD(DSO)) {
                        memset(&alf, 0, sizeof(alf));
                        memset(&alt, 0, sizeof(alt));
-                       thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf);
-                       thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
+                       thread__find_map(thread, sample->cpumode, from, &alf);
+                       thread__find_map(thread, sample->cpumode, to, &alt);
                }
 
                printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -764,13 +766,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
                from = br->entries[i].from;
                to   = br->entries[i].to;
 
-               thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf);
-               if (alf.map)
-                       alf.sym = map__find_symbol(alf.map, alf.addr);
-
-               thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
-               if (alt.map)
-                       alt.sym = map__find_symbol(alt.map, alt.addr);
+               thread__find_symbol(thread, sample->cpumode, from, &alf);
+               thread__find_symbol(thread, sample->cpumode, to, &alt);
 
                printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
                if (PRINT_FIELD(DSO)) {
@@ -814,12 +811,12 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
                from = br->entries[i].from;
                to   = br->entries[i].to;
 
-               thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf);
-               if (alf.map && !alf.map->dso->adjust_symbols)
+               if (thread__find_map(thread, sample->cpumode, from, &alf) &&
+                   !alf.map->dso->adjust_symbols)
                        from = map__map_ip(alf.map, from);
 
-               thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
-               if (alt.map && !alt.map->dso->adjust_symbols)
+               if (thread__find_map(thread, sample->cpumode, to, &alt) &&
+                   !alt.map->dso->adjust_symbols)
                        to = map__map_ip(alt.map, to);
 
                printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -882,8 +879,7 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
                return 0;
        }
 
-       thread__find_addr_map(thread, *cpumode, MAP__FUNCTION, start, &al);
-       if (!al.map || !al.map->dso) {
+       if (!thread__find_map(thread, *cpumode, start, &al) || !al.map->dso) {
                pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
                return 0;
        }
@@ -933,10 +929,8 @@ static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
 
        memset(&al, 0, sizeof(al));
 
-       thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al);
-       if (!al.map)
-               thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
-                                     addr, &al);
+       thread__find_map(thread, cpumode, addr, &al);
+
        if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end)
                return 0;
 
index f17dc601b0f39aaff4f5c92a27b93b0075df5741..a4f662a462c66c2112db0a1947d21d5d97d2431e 100644 (file)
@@ -164,6 +164,7 @@ static bool                 forever                         = false;
 static bool                    metric_only                     = false;
 static bool                    force_metric_only               = false;
 static bool                    no_merge                        = false;
+static bool                    walltime_run_table              = false;
 static struct timespec         ref_time;
 static struct cpu_map          *aggr_map;
 static aggr_get_id_t           aggr_get_id;
@@ -173,6 +174,7 @@ static const char           *output_name;
 static int                     output_fd;
 static int                     print_free_counters_hint;
 static int                     print_mixed_hw_group_error;
+static u64                     *walltime_run;
 
 struct perf_stat {
        bool                     record;
@@ -569,7 +571,7 @@ static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
        return leader;
 }
 
-static int __run_perf_stat(int argc, const char **argv)
+static int __run_perf_stat(int argc, const char **argv, int run_idx)
 {
        int interval = stat_config.interval;
        int times = stat_config.times;
@@ -752,6 +754,9 @@ static int __run_perf_stat(int argc, const char **argv)
 
        t1 = rdclock();
 
+       if (walltime_run_table)
+               walltime_run[run_idx] = t1 - t0;
+
        update_stats(&walltime_nsecs_stats, t1 - t0);
 
        /*
@@ -766,7 +771,7 @@ static int __run_perf_stat(int argc, const char **argv)
        return WEXITSTATUS(status);
 }
 
-static int run_perf_stat(int argc, const char **argv)
+static int run_perf_stat(int argc, const char **argv, int run_idx)
 {
        int ret;
 
@@ -779,7 +784,7 @@ static int run_perf_stat(int argc, const char **argv)
        if (sync_run)
                sync();
 
-       ret = __run_perf_stat(argc, argv);
+       ret = __run_perf_stat(argc, argv, run_idx);
        if (ret)
                return ret;
 
@@ -1764,19 +1769,67 @@ static void print_header(int argc, const char **argv)
        }
 }
 
+static int get_precision(double num)
+{
+       if (num > 1)
+               return 0;
+
+       return lround(ceil(-log10(num)));
+}
+
+static void print_table(FILE *output, int precision, double avg)
+{
+       char tmp[64];
+       int idx, indent = 0;
+
+       scnprintf(tmp, 64, " %17.*f", precision, avg);
+       while (tmp[indent] == ' ')
+               indent++;
+
+       fprintf(output, "%*s# Table of individual measurements:\n", indent, "");
+
+       for (idx = 0; idx < run_count; idx++) {
+               double run = (double) walltime_run[idx] / NSEC_PER_SEC;
+               int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5);
+
+               fprintf(output, " %17.*f (%+.*f) ",
+                       precision, run, precision, run - avg);
+
+               for (h = 0; h < n; h++)
+                       fprintf(output, "#");
+
+               fprintf(output, "\n");
+       }
+
+       fprintf(output, "\n%*s# Final result:\n", indent, "");
+}
+
 static void print_footer(void)
 {
+       double avg = avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
        FILE *output = stat_config.output;
        int n;
 
        if (!null_run)
                fprintf(output, "\n");
-       fprintf(output, " %17.9f seconds time elapsed",
-                       avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC);
-       if (run_count > 1) {
-               fprintf(output, "                                        ");
-               print_noise_pct(stddev_stats(&walltime_nsecs_stats),
-                               avg_stats(&walltime_nsecs_stats));
+
+       if (run_count == 1) {
+               fprintf(output, " %17.9f seconds time elapsed", avg);
+       } else {
+               double sd = stddev_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
+               /*
+                * Display at most 2 more significant
+                * digits than the stddev inaccuracy.
+                */
+               int precision = get_precision(sd) + 2;
+
+               if (walltime_run_table)
+                       print_table(output, precision, avg);
+
+               fprintf(output, " %17.*f +- %.*f seconds time elapsed",
+                       precision, avg, precision, sd);
+
+               print_noise_pct(sd, avg);
        }
        fprintf(output, "\n\n");
 
@@ -1952,6 +2005,8 @@ static const struct option stat_options[] = {
                    "be more verbose (show counter open errors, etc)"),
        OPT_INTEGER('r', "repeat", &run_count,
                    "repeat command and print average + stddev (max: 100, forever: 0)"),
+       OPT_BOOLEAN(0, "table", &walltime_run_table,
+                   "display details about each run (only with -r option)"),
        OPT_BOOLEAN('n', "null", &null_run,
                    "null run - dont start any counters"),
        OPT_INCR('d', "detailed", &detailed_run,
@@ -2843,6 +2898,13 @@ int cmd_stat(int argc, const char **argv)
                goto out;
        }
 
+       if (walltime_run_table && run_count <= 1) {
+               fprintf(stderr, "--table is only supported with -r\n");
+               parse_options_usage(stat_usage, stat_options, "r", 1);
+               parse_options_usage(NULL, stat_options, "table", 0);
+               goto out;
+       }
+
        if (output_fd < 0) {
                fprintf(stderr, "argument to --log-fd must be a > 0\n");
                parse_options_usage(stat_usage, stat_options, "log-fd", 0);
@@ -2897,6 +2959,14 @@ int cmd_stat(int argc, const char **argv)
                run_count = 1;
        }
 
+       if (walltime_run_table) {
+               walltime_run = zalloc(run_count * sizeof(walltime_run[0]));
+               if (!walltime_run) {
+                       pr_err("failed to setup -r option");
+                       goto out;
+               }
+       }
+
        if ((stat_config.aggr_mode == AGGR_THREAD) &&
                !target__has_task(&target)) {
                if (!target.system_wide || target.cpu_list) {
@@ -3012,7 +3082,7 @@ int cmd_stat(int argc, const char **argv)
                        fprintf(output, "[ perf stat: executing run #%d ... ]\n",
                                run_idx + 1);
 
-               status = run_perf_stat(argc, argv);
+               status = run_perf_stat(argc, argv, run_idx);
                if (forever && status != -1) {
                        print_counters(NULL, argc, argv);
                        perf_stat__reset_stats();
@@ -3060,6 +3130,8 @@ int cmd_stat(int argc, const char **argv)
        perf_stat__exit_aggr_mode();
        perf_evlist__free_stats(evsel_list);
 out:
+       free(walltime_run);
+
        if (smi_cost && smi_reset)
                sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
 
index 813698a9b8c72d7188683dea53bf838a16c81c9d..a827919c626308beefdf2eb1f2d1b6f69c30edcd 100644 (file)
@@ -533,12 +533,8 @@ static const char *cat_backtrace(union perf_event *event,
                }
 
                tal.filtered = 0;
-               thread__find_addr_location(al.thread, cpumode,
-                                          MAP__FUNCTION, ip, &tal);
-
-               if (tal.sym)
-                       fprintf(f, "..... %016" PRIx64 " %s\n", ip,
-                               tal.sym->name);
+               if (thread__find_symbol(al.thread, cpumode, ip, &tal))
+                       fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
                else
                        fprintf(f, "..... %016" PRIx64 "\n", ip);
        }
index f39bd60d2708c7ae0c05a748d719bd89654270ef..7a349fcd38642c9cd9fb4c1f14b53fc45ff23871 100644 (file)
@@ -742,7 +742,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
 "Check /proc/sys/kernel/kptr_restrict.\n\n"
 "Kernel%s samples will not be resolved.\n",
-                         al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
+                         al.map && map__has_symbols(al.map) ?
                          " modules" : "");
                        if (use_browser <= 0)
                                sleep(5);
@@ -750,7 +750,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
                machine->kptr_restrict_warned = true;
        }
 
-       if (al.sym == NULL) {
+       if (al.sym == NULL && al.map != NULL) {
                const char *msg = "Kernel samples will not be resolved.\n";
                /*
                 * As we do lazy loading of symtabs we only will know if the
@@ -764,8 +764,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
                 * invalid --vmlinux ;-)
                 */
                if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
-                   al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
-                   RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
+                   __map__is_kernel(al.map) && map__has_symbols(al.map)) {
                        if (symbol_conf.vmlinux_name) {
                                char serr[256];
                                dso__strerror_load(al.map->dso, serr, sizeof(serr));
@@ -1265,7 +1264,7 @@ int cmd_top(int argc, const char **argv)
                        .proc_map_timeout    = 500,
                        .overwrite      = 1,
                },
-               .max_stack           = sysctl_perf_event_max_stack,
+               .max_stack           = sysctl__max_stack(),
                .sym_pcnt_filter     = 5,
                .nr_threads_synthesize = UINT_MAX,
        };
index 3ad17ee89403b8e1bbf49fc1e6e077de5bb07cdd..560aed7da36a44df845928d3b46c627e27cc71d3 100644 (file)
@@ -2024,8 +2024,7 @@ static int trace__pgfault(struct trace *trace,
        if (trace->summary_only)
                goto out;
 
-       thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
-                             sample->ip, &al);
+       thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
 
        trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
 
@@ -2037,12 +2036,10 @@ static int trace__pgfault(struct trace *trace,
 
        fprintf(trace->output, "] => ");
 
-       thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
-                                  sample->addr, &al);
+       thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
 
        if (!al.map) {
-               thread__find_addr_location(thread, sample->cpumode,
-                                          MAP__FUNCTION, sample->addr, &al);
+               thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
 
                if (al.map)
                        map_type = 'x';
@@ -3165,7 +3162,7 @@ int cmd_trace(int argc, const char **argv)
                mmap_pages_user_set = false;
 
        if (trace.max_stack == UINT_MAX) {
-               trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
+               trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
                max_stack_user_set = false;
        }
 
index 9aff89bc75351d13353cd299b324adef99906a1b..10f333e2e82507977e08c2662b8ef62d2450d20e 100755 (executable)
@@ -55,22 +55,26 @@ include/uapi/asm-generic/ioctls.h
 include/uapi/asm-generic/mman-common.h
 '
 
-check () {
-  file=$1
+check_2 () {
+  file1=$1
+  file2=$2
 
   shift
-  opts=
-  while [ -n "$*" ]; do
-    opts="$opts \"$1\""
-    shift
-  done
+  shift
 
-  cmd="diff $opts ../$file ../../$file > /dev/null"
+  cmd="diff $* $file1 $file2 > /dev/null"
 
-  test -f ../../$file &&
+  test -f $file2 &&
   eval $cmd || echo "Warning: Kernel ABI header at 'tools/$file' differs from latest version at '$file'" >&2
 }
 
+check () {
+  file=$1
+
+  shift
+
+  check_2 ../$file ../../$file $*
+}
 
 # Check if we have the kernel headers (tools/perf/../../include), else
 # we're probably on a detached tarball, so no point in trying to check
@@ -83,7 +87,7 @@ for i in $HEADERS; do
 done
 
 # diff with extra ignore lines
-check arch/x86/lib/memcpy_64.S        -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
-check arch/x86/lib/memset_64.S        -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
-check include/uapi/asm-generic/mman.h -I "^#include <\(uapi/\)*asm-generic/mman-common.h>"
-check include/uapi/linux/mman.h       -I "^#include <\(uapi/\)*asm/mman.h>"
+check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
+check arch/x86/lib/memset_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
+check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"'
+check include/uapi/linux/mman.h       '-I "^#include <\(uapi/\)*asm/mman.h>"'
diff --git a/tools/perf/examples/bpf/5sec.c b/tools/perf/examples/bpf/5sec.c
new file mode 100644 (file)
index 0000000..b9c2032
--- /dev/null
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+    Description:
+
+    . Disable strace like syscall tracing (--no-syscalls), or try tracing
+      just some (-e *sleep).
+
+    . Attach a filter function to a kernel function, returning when it should
+      be considered, i.e. appear on the output.
+
+    . Run it system wide, so that any sleep of >= 5 seconds and < than 6
+      seconds gets caught.
+
+    . Ask for callgraphs using DWARF info, so that userspace can be unwound
+
+    . While this is running, run something like "sleep 5s".
+
+    . If we decide to add tv_nsec as well, then it becomes:
+
+      int probe(hrtimer_nanosleep, rqtp->tv_sec rqtp->tv_nsec)(void *ctx, int err, long sec, long nsec)
+
+      I.e. add where it comes from (rqtp->tv_nsec) and where it will be
+      accessible in the function body (nsec)
+
+    # perf trace --no-syscalls -e tools/perf/examples/bpf/5sec.c/call-graph=dwarf/
+         0.000 perf_bpf_probe:func:(ffffffff9811b5f0) tv_sec=5
+                                           hrtimer_nanosleep ([kernel.kallsyms])
+                                           __x64_sys_nanosleep ([kernel.kallsyms])
+                                           do_syscall_64 ([kernel.kallsyms])
+                                           entry_SYSCALL_64 ([kernel.kallsyms])
+                                           __GI___nanosleep (/usr/lib64/libc-2.26.so)
+                                           rpl_nanosleep (/usr/bin/sleep)
+                                           xnanosleep (/usr/bin/sleep)
+                                           main (/usr/bin/sleep)
+                                           __libc_start_main (/usr/lib64/libc-2.26.so)
+                                           _start (/usr/bin/sleep)
+    ^C#
+
+   Copyright (C) 2018 Red Hat, Inc., Arnaldo Carvalho de Melo <acme@redhat.com>
+*/
+
+#include <bpf.h>
+
+int probe(hrtimer_nanosleep, rqtp->tv_sec)(void *ctx, int err, long sec)
+{
+       return sec == 5;
+}
+
+license(GPL);
diff --git a/tools/perf/examples/bpf/empty.c b/tools/perf/examples/bpf/empty.c
new file mode 100644 (file)
index 0000000..3776d26
--- /dev/null
@@ -0,0 +1,3 @@
+#include <bpf.h>
+
+license(GPL);
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
new file mode 100644 (file)
index 0000000..dd764ad
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _PERF_BPF_H
+#define _PERF_BPF_H
+#define SEC(NAME) __attribute__((section(NAME),  used))
+
+#define probe(function, vars) \
+       SEC(#function "=" #function " " #vars) function
+
+#define license(name) \
+char _license[] SEC("license") = #name; \
+int _version SEC("version") = LINUX_VERSION_CODE;
+
+#endif /* _PERF_BPF_H */
index 20a08cb323329be20348fbf513091d40780ac12a..51c81509a315ba26ce508a59ebafe5a9179559df 100644 (file)
@@ -238,7 +238,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                        (*argc)--;
                } else if (strstarts(cmd, CMD_DEBUGFS_DIR)) {
                        tracing_path_set(cmd + strlen(CMD_DEBUGFS_DIR));
-                       fprintf(stderr, "dir: %s\n", tracing_path);
+                       fprintf(stderr, "dir: %s\n", tracing_path_mount());
                        if (envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--list-cmds")) {
@@ -421,22 +421,11 @@ void pthread__unblock_sigwinch(void)
        pthread_sigmask(SIG_UNBLOCK, &set, NULL);
 }
 
-#ifdef _SC_LEVEL1_DCACHE_LINESIZE
-#define cache_line_size(cacheline_sizep) *cacheline_sizep = sysconf(_SC_LEVEL1_DCACHE_LINESIZE)
-#else
-static void cache_line_size(int *cacheline_sizep)
-{
-       if (sysfs__read_int("devices/system/cpu/cpu0/cache/index0/coherency_line_size", cacheline_sizep))
-               pr_debug("cannot determine cache line size");
-}
-#endif
-
 int main(int argc, const char **argv)
 {
        int err;
        const char *cmd;
        char sbuf[STRERR_BUFSIZE];
-       int value;
 
        /* libsubcmd init */
        exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
@@ -444,13 +433,6 @@ int main(int argc, const char **argv)
 
        /* The page_size is placed in util object. */
        page_size = sysconf(_SC_PAGE_SIZE);
-       cache_line_size(&cacheline_size);
-
-       if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
-               sysctl_perf_event_max_stack = value;
-
-       if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
-               sysctl_perf_event_max_contexts_per_stack = value;
 
        cmd = extract_argv0_path(argv[0]);
        if (!cmd)
@@ -458,15 +440,11 @@ int main(int argc, const char **argv)
 
        srandom(time(NULL));
 
-       perf_config__init();
        err = perf_config(perf_default_config, NULL);
        if (err)
                return err;
        set_buildid_dir(NULL);
 
-       /* get debugfs/tracefs mount point from /proc/mounts */
-       tracing_path_mount();
-
        /*
         * "perf-xxxx" is the same as "perf xxxx", but we obviously:
         *
index cac8f8889bc3b540355963ee793fb0e4aadbfd12..2bde505e2e7ea0c2b1157e2734be89d291351e66 100644 (file)
@@ -654,6 +654,15 @@ static int perf_test__list(int argc, const char **argv)
                        continue;
 
                pr_info("%2d: %s\n", i, t->desc);
+
+               if (t->subtest.get_nr) {
+                       int subn = t->subtest.get_nr();
+                       int subi;
+
+                       for (subi = 0; subi < subn; subi++)
+                               pr_info("%2d:%1d: %s\n", i, subi + 1,
+                                       t->subtest.get_desc(subi));
+               }
        }
 
        perf_test__list_shell(argc, argv, i);
index 99936352df4f0654a025399ececc5dabadf8e159..afa4ce21ba7c5e31e657a66687a7379896a1672f 100644 (file)
@@ -236,14 +236,13 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
 
        pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
 
-       thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al);
-       if (!al.map || !al.map->dso) {
+       if (!thread__find_map(thread, cpumode, addr, &al) || !al.map->dso) {
                if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
                        pr_debug("Hypervisor address can not be resolved - skipping\n");
                        return 0;
                }
 
-               pr_debug("thread__find_addr_map failed\n");
+               pr_debug("thread__find_map failed\n");
                return -1;
        }
 
index f7c5b613d6670f2f4a302742bf95b8cfda8742d0..b889a28fd80b0ce861c59623aeeb2cbb99a246ba 100644 (file)
@@ -131,20 +131,20 @@ struct machine *setup_fake_machine(struct machines *machines)
                        goto out;
 
                /* emulate dso__load() */
-               dso__set_loaded(dso, MAP__FUNCTION);
+               dso__set_loaded(dso);
 
                for (k = 0; k < fake_symbols[i].nr_syms; k++) {
                        struct symbol *sym;
                        struct fake_sym *fsym = &fake_symbols[i].syms[k];
 
                        sym = symbol__new(fsym->start, fsym->length,
-                                         STB_GLOBAL, fsym->name);
+                                         STB_GLOBAL, STT_FUNC, fsym->name);
                        if (sym == NULL) {
                                dso__put(dso);
                                goto out;
                        }
 
-                       symbols__insert(&dso->symbols[MAP__FUNCTION], sym);
+                       symbols__insert(&dso->symbols, sym);
                }
 
                dso__put(dso);
index 868d82b501f4241addc27a2a0aaa6fe11bd1fb6f..b1af2499a3c972a97ddbe766f0fe272716a9fca0 100644 (file)
@@ -188,9 +188,8 @@ static int mmap_events(synth_cb synth)
 
                pr_debug("looking for map %p\n", td->map);
 
-               thread__find_addr_map(thread,
-                                     PERF_RECORD_MISC_USER, MAP__FUNCTION,
-                                     (unsigned long) (td->map + 1), &al);
+               thread__find_map(thread, PERF_RECORD_MISC_USER,
+                                (unsigned long) (td->map + 1), &al);
 
                thread__put(thread);
 
@@ -218,7 +217,7 @@ static int mmap_events(synth_cb synth)
  *   perf_event__synthesize_threads    (global)
  *
  * We test we can find all memory maps via:
- *   thread__find_addr_map
+ *   thread__find_map
  *
  * by using all thread objects.
  */
index 18b06444f230d6563de86a571fac2f86bf4af0b8..b9ebe15afb1384e5a034f486cf145f2dd8f0773b 100644 (file)
@@ -1309,18 +1309,26 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist)
        return 0;
 }
 
+static int test__intel_pt(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+       TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "intel_pt//u") == 0);
+       return 0;
+}
+
 static int count_tracepoints(void)
 {
        struct dirent *events_ent;
        DIR *events_dir;
        int cnt = 0;
 
-       events_dir = opendir(tracing_events_path);
+       events_dir = tracing_events__opendir();
 
        TEST_ASSERT_VAL("Can't open events dir", events_dir);
 
        while ((events_ent = readdir(events_dir))) {
-               char sys_path[PATH_MAX];
+               char *sys_path;
                struct dirent *sys_ent;
                DIR *sys_dir;
 
@@ -1331,8 +1339,8 @@ static int count_tracepoints(void)
                    || !strcmp(events_ent->d_name, "header_page"))
                        continue;
 
-               scnprintf(sys_path, PATH_MAX, "%s/%s",
-                         tracing_events_path, events_ent->d_name);
+               sys_path = get_events_file(events_ent->d_name);
+               TEST_ASSERT_VAL("Can't get sys path", sys_path);
 
                sys_dir = opendir(sys_path);
                TEST_ASSERT_VAL("Can't open sys dir", sys_dir);
@@ -1348,6 +1356,7 @@ static int count_tracepoints(void)
                }
 
                closedir(sys_dir);
+               put_events_file(sys_path);
        }
 
        closedir(events_dir);
@@ -1637,6 +1646,11 @@ static struct evlist_test test__events[] = {
                .check = test__checkevent_config_cache,
                .id    = 51,
        },
+       {
+               .name  = "intel_pt//u",
+               .check = test__intel_pt,
+               .id    = 52,
+       },
 };
 
 static struct evlist_test test__events_pmu[] = {
index ee86473643be59d117184e645f6233df858a5cea..650b208f700fa0aefc003f727432521f7cf324df 100755 (executable)
@@ -16,18 +16,18 @@ nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
 trace_libc_inet_pton_backtrace() {
        idx=0
        expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)"
-       expected[1]=".*inet_pton[[:space:]]\($libc|inlined\)$"
+       expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
        case "$(uname -m)" in
        s390x)
                eventattr='call-graph=dwarf,max-stack=4'
-               expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$"
-               expected[3]="(__GI_)?getaddrinfo[[:space:]]\($libc|inlined\)$"
-               expected[4]="main[[:space:]]\(.*/bin/ping.*\)$"
+               expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
+               expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
+               expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
                ;;
        *)
                eventattr='max-stack=3'
-               expected[2]="getaddrinfo[[:space:]]\($libc\)$"
-               expected[3]=".*\(.*/bin/ping.*\)$"
+               expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$"
+               expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
                ;;
        esac
 
index 1e5adb65632a8def52a4c329e66df0e1b9d05ba1..7691980b7df1603f1391005833b1cbb7c315770e 100644 (file)
@@ -19,8 +19,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
        struct symbol *sym;
        struct map *kallsyms_map, *vmlinux_map, *map;
        struct machine kallsyms, vmlinux;
-       enum map_type type = MAP__FUNCTION;
-       struct maps *maps = &vmlinux.kmaps.maps[type];
+       struct maps *maps = machine__kernel_maps(&vmlinux);
        u64 mem_start, mem_end;
        bool header_printed;
 
@@ -56,7 +55,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
         * be compacted against the list of modules found in the "vmlinux"
         * code and with the one got from /proc/modules from the "kallsyms" code.
         */
-       if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type) <= 0) {
+       if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
                pr_debug("dso__load_kallsyms ");
                goto out;
        }
@@ -94,7 +93,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
         * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
         * to fixup the symbols.
         */
-       if (machine__load_vmlinux_path(&vmlinux, type) <= 0) {
+       if (machine__load_vmlinux_path(&vmlinux) <= 0) {
                pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
                err = TEST_SKIP;
                goto out;
@@ -108,7 +107,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
         * in the kallsyms dso. For the ones that are in both, check its names and
         * end addresses too.
         */
-       for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
+       map__for_each_symbol(vmlinux_map, sym, nd) {
                struct symbol *pair, *first_pair;
 
                sym  = rb_entry(nd, struct symbol, rb_node);
@@ -119,8 +118,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
                mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start);
                mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end);
 
-               first_pair = machine__find_kernel_symbol(&kallsyms, type,
-                                                        mem_start, NULL);
+               first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
                pair = first_pair;
 
                if (pair && UM(pair->start) == mem_start) {
@@ -149,7 +147,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
                                 */
                                continue;
                        } else {
-                               pair = machine__find_kernel_symbol_by_name(&kallsyms, type, sym->name, NULL);
+                               pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
                                if (pair) {
                                        if (UM(pair->start) == mem_start)
                                                goto next_pair;
@@ -183,7 +181,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
                 * so use the short name, less descriptive but the same ("[kernel]" in
                 * both cases.
                 */
-               pair = map_groups__find_by_name(&kallsyms.kmaps, type,
+               pair = map_groups__find_by_name(&kallsyms.kmaps,
                                                (map->dso->kernel ?
                                                        map->dso->short_name :
                                                        map->dso->name));
@@ -206,7 +204,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
                mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
                mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
 
-               pair = map_groups__find(&kallsyms.kmaps, type, mem_start);
+               pair = map_groups__find(&kallsyms.kmaps, mem_start);
                if (pair == NULL || pair->priv)
                        continue;
 
@@ -228,7 +226,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
 
        header_printed = false;
 
-       maps = &kallsyms.kmaps.maps[type];
+       maps = machine__kernel_maps(&kallsyms);
 
        for (map = maps__first(maps); map; map = map__next(map)) {
                if (!map->priv) {
index 3781d74088a744042ff6dc2d8588154a01110e62..8be40fa903aac431acfae9669a90e6ebce1d2f2d 100644 (file)
@@ -695,6 +695,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
                "O             Bump offset level (jump targets -> +call -> all -> cycle thru)\n"
                "s             Toggle source code view\n"
                "t             Circulate percent, total period, samples view\n"
+               "c             Show min/max cycle\n"
                "/             Search string\n"
                "k             Toggle line numbers\n"
                "P             Print to [symbol_name].annotation file.\n"
@@ -791,6 +792,13 @@ static int annotate_browser__run(struct annotate_browser *browser,
                                notes->options->show_total_period = true;
                        annotation__update_column_widths(notes);
                        continue;
+               case 'c':
+                       if (notes->options->show_minmax_cycle)
+                               notes->options->show_minmax_cycle = false;
+                       else
+                               notes->options->show_minmax_cycle = true;
+                       annotation__update_column_widths(notes);
+                       continue;
                case K_LEFT:
                case K_ESC:
                case 'q':
index e03fa75f108a8b90528c6bb763588619eeb1ab98..5b8b8c637686fddcec8d3db65127a323e9a9529a 100644 (file)
@@ -104,7 +104,7 @@ int map__browse(struct map *map)
 {
        struct map_browser mb = {
                .b = {
-                       .entries = &map->dso->symbols[map->type],
+                       .entries = &map->dso->symbols,
                        .refresh = ui_browser__rb_tree_refresh,
                        .seek    = ui_browser__rb_tree_seek,
                        .write   = map_browser__write,
index 6832fcb2e6ff015be9155d94289594c3fbbad433..c1eb476da91b153f16b65b2b0874536ed26973fb 100644 (file)
@@ -819,8 +819,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                }
 
                if (h->ms.map == NULL && verbose > 1) {
-                       __map_groups__fprintf_maps(h->thread->mg,
-                                                  MAP__FUNCTION, fp);
+                       map_groups__fprintf(h->thread->mg, fp);
                        fprintf(fp, "%.10s end\n", graph_dotted_line);
                }
        }
index 8052373bcd6a0cf1d4c7e136af5d1afc36d10920..5d4c45b7689573befd513071425bf981e15f9c06 100644 (file)
@@ -152,6 +152,8 @@ libperf-y += perf-hooks.o
 libperf-$(CONFIG_CXX) += c++/
 
 CFLAGS_config.o   += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))"
+
 # avoid compiler warnings in 32-bit mode
 CFLAGS_genelf_debug.o  += -Wno-packed
 
index 5d74a30fe00f1f96873c5123e65631c1f8c51d5a..71897689dacf85b3330f531e1c261c361016b9be 100644 (file)
@@ -760,6 +760,15 @@ static int __symbol__account_cycles(struct annotation *notes,
        ch[offset].num_aggr++;
        ch[offset].cycles_aggr += cycles;
 
+       if (cycles > ch[offset].cycles_max)
+               ch[offset].cycles_max = cycles;
+
+       if (ch[offset].cycles_min) {
+               if (cycles && cycles < ch[offset].cycles_min)
+                       ch[offset].cycles_min = cycles;
+       } else
+               ch[offset].cycles_min = cycles;
+
        if (!have_start && ch[offset].have_start)
                return 0;
        if (ch[offset].num) {
@@ -953,8 +962,11 @@ void annotation__compute_ipc(struct annotation *notes, size_t size)
                        if (ch->have_start)
                                annotation__count_and_fill(notes, ch->start, offset, ch);
                        al = notes->offsets[offset];
-                       if (al && ch->num_aggr)
+                       if (al && ch->num_aggr) {
                                al->cycles = ch->cycles_aggr / ch->num_aggr;
+                               al->cycles_max = ch->cycles_max;
+                               al->cycles_min = ch->cycles_min;
+                       }
                        notes->have_cycles = true;
                }
        }
@@ -1953,6 +1965,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
        u64 len;
        int width = symbol_conf.show_total_period ? 12 : 8;
        int graph_dotted_len;
+       char buf[512];
 
        filename = strdup(dso->long_name);
        if (!filename)
@@ -1965,8 +1978,11 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
 
        len = symbol__size(sym);
 
-       if (perf_evsel__is_group_event(evsel))
+       if (perf_evsel__is_group_event(evsel)) {
                width *= evsel->nr_members;
+               perf_evsel__group_desc(evsel, buf, sizeof(buf));
+               evsel_name = buf;
+       }
 
        graph_dotted_len = printf(" %-*.*s|     Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
                                  width, width, symbol_conf.show_total_period ? "Period" :
@@ -2486,13 +2502,38 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
                else
                        obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
 
-               if (al->cycles)
-                       obj__printf(obj, "%*" PRIu64 " ",
+               if (!notes->options->show_minmax_cycle) {
+                       if (al->cycles)
+                               obj__printf(obj, "%*" PRIu64 " ",
                                           ANNOTATION__CYCLES_WIDTH - 1, al->cycles);
-               else if (!show_title)
-                       obj__printf(obj, "%*s", ANNOTATION__CYCLES_WIDTH, " ");
-               else
-                       obj__printf(obj, "%*s ", ANNOTATION__CYCLES_WIDTH - 1, "Cycle");
+                       else if (!show_title)
+                               obj__printf(obj, "%*s",
+                                           ANNOTATION__CYCLES_WIDTH, " ");
+                       else
+                               obj__printf(obj, "%*s ",
+                                           ANNOTATION__CYCLES_WIDTH - 1,
+                                           "Cycle");
+               } else {
+                       if (al->cycles) {
+                               char str[32];
+
+                               scnprintf(str, sizeof(str),
+                                       "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
+                                       al->cycles, al->cycles_min,
+                                       al->cycles_max);
+
+                               obj__printf(obj, "%*s ",
+                                           ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
+                                           str);
+                       } else if (!show_title)
+                               obj__printf(obj, "%*s",
+                                           ANNOTATION__MINMAX_CYCLES_WIDTH,
+                                           " ");
+                       else
+                               obj__printf(obj, "%*s ",
+                                           ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
+                                           "Cycle(min/max)");
+               }
        }
 
        obj__printf(obj, " ");
index f28a9e43421d9c364519db7dd9267a23ea89d8e9..5080b6dd98b8e7edaa4828d63a2f2e0211c6a4b6 100644 (file)
@@ -61,6 +61,7 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
 
 #define ANNOTATION__IPC_WIDTH 6
 #define ANNOTATION__CYCLES_WIDTH 6
+#define ANNOTATION__MINMAX_CYCLES_WIDTH 19
 
 struct annotation_options {
        bool hide_src_code,
@@ -69,7 +70,8 @@ struct annotation_options {
             show_linenr,
             show_nr_jumps,
             show_nr_samples,
-            show_total_period;
+            show_total_period,
+            show_minmax_cycle;
        u8   offset_level;
 };
 
@@ -105,6 +107,8 @@ struct annotation_line {
        int                      jump_sources;
        float                    ipc;
        u64                      cycles;
+       u64                      cycles_max;
+       u64                      cycles_min;
        size_t                   privsize;
        char                    *path;
        u32                      idx;
@@ -186,6 +190,8 @@ struct cyc_hist {
        u64     start;
        u64     cycles;
        u64     cycles_aggr;
+       u64     cycles_max;
+       u64     cycles_min;
        u32     num;
        u32     num_aggr;
        u8      have_start;
@@ -239,6 +245,9 @@ struct annotation {
 
 static inline int annotation__cycles_width(struct annotation *notes)
 {
+       if (notes->have_cycles && notes->options->show_minmax_cycle)
+               return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH;
+
        return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0;
 }
 
index 857de69a53610ebfeea697fc61df170986ef53ff..d056447520a234cf759ac643fc2dd55b8eec47d9 100644 (file)
@@ -1679,7 +1679,7 @@ struct sym_args {
 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
 {
        /* A function with the same name, and global or the n'th found or any */
-       return symbol_type__is_a(type, MAP__FUNCTION) &&
+       return kallsyms__is_function(type) &&
               !strcmp(name, args->name) &&
               ((args->global && isupper(type)) ||
                (args->selected && ++(args->cnt) == args->idx) ||
@@ -1784,7 +1784,7 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
 {
        struct sym_args *args = arg;
 
-       if (!symbol_type__is_a(type, MAP__FUNCTION))
+       if (!kallsyms__is_function(type))
                return 0;
 
        if (!args->started) {
@@ -1915,7 +1915,7 @@ static void print_duplicate_syms(struct dso *dso, const char *sym_name)
 
        pr_err("Multiple symbols with name '%s'\n", sym_name);
 
-       sym = dso__first_symbol(dso, MAP__FUNCTION);
+       sym = dso__first_symbol(dso);
        while (sym) {
                if (dso_sym_match(sym, sym_name, &cnt, -1)) {
                        pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
@@ -1945,7 +1945,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
        *start = 0;
        *size = 0;
 
-       sym = dso__first_symbol(dso, MAP__FUNCTION);
+       sym = dso__first_symbol(dso);
        while (sym) {
                if (*start) {
                        if (!*size)
@@ -1972,8 +1972,8 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
 
 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
 {
-       struct symbol *first_sym = dso__first_symbol(dso, MAP__FUNCTION);
-       struct symbol *last_sym = dso__last_symbol(dso, MAP__FUNCTION);
+       struct symbol *first_sym = dso__first_symbol(dso);
+       struct symbol *last_sym = dso__last_symbol(dso);
 
        if (!first_sym || !last_sym) {
                pr_err("Failed to determine filter for %s\nNo symbols found.\n",
index 537eadd81914f6ae4c5cd64c9fd5f109fe532b50..04b1d53e4bf9a7d3606169bbb25cdb1a5d059600 100644 (file)
@@ -47,9 +47,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
                return -1;
        }
 
-       thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, &al);
-
-       if (al.map != NULL)
+       if (thread__find_map(thread, sample->cpumode, sample->ip, &al))
                al.map->dso->hit = 1;
 
        thread__put(thread);
index 84eb9393c7db1bf304b77d9984a11d5aea6a673d..5ac157056cdfcc1f536b68927500d5893abd63bc 100644 (file)
@@ -707,6 +707,14 @@ struct perf_config_set *perf_config_set__new(void)
        return set;
 }
 
+static int perf_config__init(void)
+{
+       if (config_set == NULL)
+               config_set = perf_config_set__new();
+
+       return config_set == NULL;
+}
+
 int perf_config(config_fn_t fn, void *data)
 {
        int ret = 0;
@@ -714,7 +722,7 @@ int perf_config(config_fn_t fn, void *data)
        struct perf_config_section *section;
        struct perf_config_item *item;
 
-       if (config_set == NULL)
+       if (config_set == NULL && perf_config__init())
                return -1;
 
        perf_config_set__for_each_entry(config_set, section, item) {
@@ -735,12 +743,6 @@ int perf_config(config_fn_t fn, void *data)
        return ret;
 }
 
-void perf_config__init(void)
-{
-       if (config_set == NULL)
-               config_set = perf_config_set__new();
-}
-
 void perf_config__exit(void)
 {
        perf_config_set__delete(config_set);
index baf82bf227acbadfb1ca547584bd123e121b3f14..bd0a5897c76a5daad5f68f6b561d1a744f029b76 100644 (file)
@@ -38,7 +38,6 @@ struct perf_config_set *perf_config_set__new(void);
 void perf_config_set__delete(struct perf_config_set *set);
 int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
                             const char *var, const char *value);
-void perf_config__init(void);
 void perf_config__exit(void);
 void perf_config__refresh(void);
 
index bf16dc9ee507d98bbb8a762f4f4221bbe7733548..822ba915d144de828347936bb2c1514943266f36 100644 (file)
@@ -270,9 +270,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
                thread = etmq->etm->unknown_thread;
        }
 
-       thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al);
-
-       if (!al.map || !al.map->dso)
+       if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
                return 0;
 
        if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
index b0c2b5c5d3375fc919f24f92a9e4b7eba11c084d..7123746edcf4fde778d5b2d8add43201d22f6d72 100644 (file)
@@ -247,9 +247,9 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
                *dso_db_id = dso->db_id;
 
                if (!al->sym) {
-                       al->sym = symbol__new(al->addr, 0, 0, "unknown");
+                       al->sym = symbol__new(al->addr, 0, 0, 0, "unknown");
                        if (al->sym)
-                               dso__insert_symbol(dso, al->map->type, al->sym);
+                               dso__insert_symbol(dso, al->sym);
                }
 
                if (al->sym) {
@@ -315,8 +315,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
                al.addr = node->ip;
 
                if (al.map && !al.sym)
-                       al.sym = dso__find_symbol(al.map->dso, MAP__FUNCTION,
-                                                 al.addr);
+                       al.sym = dso__find_symbol(al.map->dso, al.addr);
 
                db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset);
 
index 36ef45b2e89d53b674374fcf30325b49b0a34bfc..cdfc2e5f55f513b063841a7dba636458df7f5f81 100644 (file)
@@ -1014,7 +1014,7 @@ struct map *dso__new_map(const char *name)
        struct dso *dso = dso__new(name);
 
        if (dso)
-               map = map__new2(0, dso, MAP__FUNCTION);
+               map = map__new2(0, dso);
 
        return map;
 }
@@ -1176,19 +1176,19 @@ int dso__name_len(const struct dso *dso)
        return dso->short_name_len;
 }
 
-bool dso__loaded(const struct dso *dso, enum map_type type)
+bool dso__loaded(const struct dso *dso)
 {
-       return dso->loaded & (1 << type);
+       return dso->loaded;
 }
 
-bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
+bool dso__sorted_by_name(const struct dso *dso)
 {
-       return dso->sorted_by_name & (1 << type);
+       return dso->sorted_by_name;
 }
 
-void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
+void dso__set_sorted_by_name(struct dso *dso)
 {
-       dso->sorted_by_name |= (1 << type);
+       dso->sorted_by_name = true;
 }
 
 struct dso *dso__new(const char *name)
@@ -1196,12 +1196,10 @@ struct dso *dso__new(const char *name)
        struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
 
        if (dso != NULL) {
-               int i;
                strcpy(dso->name, name);
                dso__set_long_name(dso, dso->name, false);
                dso__set_short_name(dso, dso->name, false);
-               for (i = 0; i < MAP__NR_TYPES; ++i)
-                       dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
+               dso->symbols = dso->symbol_names = RB_ROOT;
                dso->data.cache = RB_ROOT;
                dso->inlined_nodes = RB_ROOT;
                dso->srclines = RB_ROOT;
@@ -1231,8 +1229,6 @@ struct dso *dso__new(const char *name)
 
 void dso__delete(struct dso *dso)
 {
-       int i;
-
        if (!RB_EMPTY_NODE(&dso->rb_node))
                pr_err("DSO %s is still in rbtree when being deleted!\n",
                       dso->long_name);
@@ -1240,8 +1236,7 @@ void dso__delete(struct dso *dso)
        /* free inlines first, as they reference symbols */
        inlines__tree_delete(&dso->inlined_nodes);
        srcline__tree_delete(&dso->srclines);
-       for (i = 0; i < MAP__NR_TYPES; ++i)
-               symbols__delete(&dso->symbols[i]);
+       symbols__delete(&dso->symbols);
 
        if (dso->short_name_allocated) {
                zfree((char **)&dso->short_name);
@@ -1451,9 +1446,7 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp)
        size_t ret = 0;
 
        list_for_each_entry(pos, head, node) {
-               int i;
-               for (i = 0; i < MAP__NR_TYPES; ++i)
-                       ret += dso__fprintf(pos, i, fp);
+               ret += dso__fprintf(pos, fp);
        }
 
        return ret;
@@ -1467,18 +1460,17 @@ size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
        return fprintf(fp, "%s", sbuild_id);
 }
 
-size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
+size_t dso__fprintf(struct dso *dso, FILE *fp)
 {
        struct rb_node *nd;
        size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
 
        if (dso->short_name != dso->long_name)
                ret += fprintf(fp, "%s, ", dso->long_name);
-       ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
-                      dso__loaded(dso, type) ? "" : "NOT ");
+       ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
        ret += dso__fprintf_buildid(dso, fp);
        ret += fprintf(fp, ")\n");
-       for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) {
                struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
                ret += symbol__fprintf(pos, fp);
        }
index c229dbe0277a2a844fca644fe7c24590697b7bb3..ef69de2e69ea74bd1e20ae590cc19ea9d4d817ed 100644 (file)
@@ -140,14 +140,14 @@ struct dso {
        struct list_head node;
        struct rb_node   rb_node;       /* rbtree node sorted by long name */
        struct rb_root   *root;         /* root of rbtree that rb_node is in */
-       struct rb_root   symbols[MAP__NR_TYPES];
-       struct rb_root   symbol_names[MAP__NR_TYPES];
+       struct rb_root   symbols;
+       struct rb_root   symbol_names;
        struct rb_root   inlined_nodes;
        struct rb_root   srclines;
        struct {
                u64             addr;
                struct symbol   *symbol;
-       } last_find_result[MAP__NR_TYPES];
+       } last_find_result;
        void             *a2l;
        char             *symsrc_filename;
        unsigned int     a2l_fails;
@@ -164,8 +164,8 @@ struct dso {
        u8               short_name_allocated:1;
        u8               long_name_allocated:1;
        u8               is_64_bit:1;
-       u8               sorted_by_name;
-       u8               loaded;
+       bool             sorted_by_name;
+       bool             loaded;
        u8               rel;
        u8               build_id[BUILD_ID_SIZE];
        u64              text_offset;
@@ -202,14 +202,13 @@ struct dso {
  * @dso: the 'struct dso *' in which symbols itereated
  * @pos: the 'struct symbol *' to use as a loop cursor
  * @n: the 'struct rb_node *' to use as a temporary storage
- * @type: the 'enum map_type' type of symbols
  */
-#define dso__for_each_symbol(dso, pos, n, type)        \
-       symbols__for_each_entry(&(dso)->symbols[(type)], pos, n)
+#define dso__for_each_symbol(dso, pos, n)      \
+       symbols__for_each_entry(&(dso)->symbols, pos, n)
 
-static inline void dso__set_loaded(struct dso *dso, enum map_type type)
+static inline void dso__set_loaded(struct dso *dso)
 {
-       dso->loaded |= (1 << type);
+       dso->loaded = true;
 }
 
 struct dso *dso__new(const char *name);
@@ -231,11 +230,16 @@ static inline void __dso__zput(struct dso **dso)
 
 #define dso__zput(dso) __dso__zput(&dso)
 
-bool dso__loaded(const struct dso *dso, enum map_type type);
+bool dso__loaded(const struct dso *dso);
 
-bool dso__sorted_by_name(const struct dso *dso, enum map_type type);
-void dso__set_sorted_by_name(struct dso *dso, enum map_type type);
-void dso__sort_by_name(struct dso *dso, enum map_type type);
+static inline bool dso__has_symbols(const struct dso *dso)
+{
+       return !RB_EMPTY_ROOT(&dso->symbols);
+}
+
+bool dso__sorted_by_name(const struct dso *dso);
+void dso__set_sorted_by_name(struct dso *dso);
+void dso__sort_by_name(struct dso *dso);
 
 void dso__set_build_id(struct dso *dso, void *build_id);
 bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
@@ -349,9 +353,8 @@ size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
 size_t __dsos__fprintf(struct list_head *head, FILE *fp);
 
 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
-size_t dso__fprintf_symbols_by_name(struct dso *dso,
-                                   enum map_type type, FILE *fp);
-size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
+size_t dso__fprintf_symbols_by_name(struct dso *dso, FILE *fp);
+size_t dso__fprintf(struct dso *dso, FILE *fp);
 
 static inline bool dso__is_vmlinux(struct dso *dso)
 {
index 4c842762e3f2d483e2f7f29a3fac640ce89602ea..59f38c7693f8648b7ce494edc864f09a0f6b09d5 100644 (file)
@@ -93,6 +93,37 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
        return 0;
 }
 
+static int perf_env__read_arch(struct perf_env *env)
+{
+       struct utsname uts;
+
+       if (env->arch)
+               return 0;
+
+       if (!uname(&uts))
+               env->arch = strdup(uts.machine);
+
+       return env->arch ? 0 : -ENOMEM;
+}
+
+static int perf_env__read_nr_cpus_avail(struct perf_env *env)
+{
+       if (env->nr_cpus_avail == 0)
+               env->nr_cpus_avail = cpu__max_present_cpu();
+
+       return env->nr_cpus_avail ? 0 : -ENOENT;
+}
+
+const char *perf_env__raw_arch(struct perf_env *env)
+{
+       return env && !perf_env__read_arch(env) ? env->arch : "unknown";
+}
+
+int perf_env__nr_cpus_avail(struct perf_env *env)
+{
+       return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
+}
+
 void cpu_cache_level__free(struct cpu_cache_level *cache)
 {
        free(cache->type);
index c4ef2e523367a26e454a22a7840853a70e5e51e9..1f3ccc36853030bc8ea58d35ea19bba8e225f732 100644 (file)
@@ -76,4 +76,7 @@ int perf_env__read_cpu_topology_map(struct perf_env *env);
 void cpu_cache_level__free(struct cpu_cache_level *cache);
 
 const char *perf_env__arch(struct perf_env *env);
+const char *perf_env__raw_arch(struct perf_env *env);
+int perf_env__nr_cpus_avail(struct perf_env *env);
+
 #endif /* __PERF_ENV_H */
index 98ff3a6a3d507f9ba41079b19423dbf49aa013b3..0c8ecf0c78a40ac1088d40742d371cae7fa3d6e7 100644 (file)
@@ -88,10 +88,10 @@ static const char *perf_ns__name(unsigned int id)
        return perf_ns__names[id];
 }
 
-static int perf_tool__process_synth_event(struct perf_tool *tool,
-                                         union perf_event *event,
-                                         struct machine *machine,
-                                         perf_event__handler_t process)
+int perf_tool__process_synth_event(struct perf_tool *tool,
+                                  union perf_event *event,
+                                  struct machine *machine,
+                                  perf_event__handler_t process)
 {
        struct perf_sample synth_sample = {
        .pid       = -1,
@@ -464,8 +464,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
 {
        int rc = 0;
        struct map *pos;
-       struct map_groups *kmaps = &machine->kmaps;
-       struct maps *maps = &kmaps->maps[MAP__FUNCTION];
+       struct maps *maps = machine__kernel_maps(machine);
        union perf_event *event = zalloc((sizeof(event->mmap) +
                                          machine->id_hdr_size));
        if (event == NULL) {
@@ -488,7 +487,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
        for (pos = maps__first(maps); pos; pos = map__next(pos)) {
                size_t size;
 
-               if (__map__is_kernel(pos))
+               if (!__map__is_kmodule(pos))
                        continue;
 
                size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
@@ -869,7 +868,7 @@ static int find_symbol_cb(void *arg, const char *name, char type,
         * Must be a function or at least an alias, as in PARISC64, where "_text" is
         * an 'A' to the same address as "_stext".
         */
-       if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
+       if (!(kallsyms__is_function(type) ||
              type == 'A') || strcmp(name, args->name))
                return 0;
 
@@ -889,9 +888,16 @@ int kallsyms__get_function_start(const char *kallsyms_filename,
        return 0;
 }
 
-int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
-                                      perf_event__handler_t process,
-                                      struct machine *machine)
+int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
+                                             perf_event__handler_t process __maybe_unused,
+                                             struct machine *machine __maybe_unused)
+{
+       return 0;
+}
+
+static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+                                               perf_event__handler_t process,
+                                               struct machine *machine)
 {
        size_t size;
        struct map *map = machine__kernel_map(machine);
@@ -944,6 +950,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
        return err;
 }
 
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+                                      perf_event__handler_t process,
+                                      struct machine *machine)
+{
+       int err;
+
+       err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
+       if (err < 0)
+               return err;
+
+       return perf_event__synthesize_extra_kmaps(tool, process, machine);
+}
+
 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
                                      struct thread_map *threads,
                                      perf_event__handler_t process,
@@ -1489,9 +1508,8 @@ int perf_event__process(struct perf_tool *tool __maybe_unused,
        return machine__process_event(machine, event, sample);
 }
 
-void thread__find_addr_map(struct thread *thread, u8 cpumode,
-                          enum map_type type, u64 addr,
-                          struct addr_location *al)
+struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
+                            struct addr_location *al)
 {
        struct map_groups *mg = thread->mg;
        struct machine *machine = mg->machine;
@@ -1505,7 +1523,7 @@ void thread__find_addr_map(struct thread *thread, u8 cpumode,
 
        if (machine == NULL) {
                al->map = NULL;
-               return;
+               return NULL;
        }
 
        if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
@@ -1533,10 +1551,10 @@ void thread__find_addr_map(struct thread *thread, u8 cpumode,
                        !perf_host)
                        al->filtered |= (1 << HIST_FILTER__HOST);
 
-               return;
+               return NULL;
        }
 try_again:
-       al->map = map_groups__find(mg, type, al->addr);
+       al->map = map_groups__find(mg, al->addr);
        if (al->map == NULL) {
                /*
                 * If this is outside of all known maps, and is a negative
@@ -1563,17 +1581,17 @@ void thread__find_addr_map(struct thread *thread, u8 cpumode,
                        map__load(al->map);
                al->addr = al->map->map_ip(al->map, al->addr);
        }
+
+       return al->map;
 }
 
-void thread__find_addr_location(struct thread *thread,
-                               u8 cpumode, enum map_type type, u64 addr,
-                               struct addr_location *al)
+struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
+                                  u64 addr, struct addr_location *al)
 {
-       thread__find_addr_map(thread, cpumode, type, addr, al);
-       if (al->map != NULL)
+       al->sym = NULL;
+       if (thread__find_map(thread, cpumode, addr, al))
                al->sym = map__find_symbol(al->map, al->addr);
-       else
-               al->sym = NULL;
+       return al->sym;
 }
 
 /*
@@ -1590,7 +1608,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
                return -1;
 
        dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
-       thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
+       thread__find_map(thread, sample->cpumode, sample->ip, al);
        dump_printf(" ...... dso: %s\n",
                    al->map ? al->map->dso->long_name :
                        al->level == 'H' ? "[hypervisor]" : "<not found>");
@@ -1669,10 +1687,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
 void thread__resolve(struct thread *thread, struct addr_location *al,
                     struct perf_sample *sample)
 {
-       thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
-       if (!al->map)
-               thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
-                                     sample->addr, al);
+       thread__find_map(thread, sample->cpumode, sample->addr, al);
 
        al->cpu = sample->cpu;
        al->sym = NULL;
index 0f794744919c0e947d09999d1cc7e685e6c42a81..bfa60bcafbde8cdb776a9d653f51f4101ef16ea6 100644 (file)
@@ -750,6 +750,10 @@ int perf_event__process_exit(struct perf_tool *tool,
                             union perf_event *event,
                             struct perf_sample *sample,
                             struct machine *machine);
+int perf_tool__process_synth_event(struct perf_tool *tool,
+                                  union perf_event *event,
+                                  struct machine *machine,
+                                  perf_event__handler_t process);
 int perf_event__process(struct perf_tool *tool,
                        union perf_event *event,
                        struct perf_sample *sample,
@@ -796,6 +800,10 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                                       bool mmap_data,
                                       unsigned int proc_map_timeout);
 
+int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
+                                      perf_event__handler_t process,
+                                      struct machine *machine);
+
 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
index a59281d6436884e918147222e60ee0ee5b84c0f3..e7a4b31a84fb5f2316549d7ab31378c46589c73a 100644 (file)
@@ -1795,3 +1795,18 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
 
        return true;
 }
+
+/*
+ * Events in data file are not collect in groups, but we still want
+ * the group display. Set the artificial group and set the leader's
+ * forced_leader flag to notify the display code.
+ */
+void perf_evlist__force_leader(struct perf_evlist *evlist)
+{
+       if (!evlist->nr_groups) {
+               struct perf_evsel *leader = perf_evlist__first(evlist);
+
+               perf_evlist__set_leader(evlist);
+               leader->forced_leader = true;
+       }
+}
index 6c41b2f7871396ba9e43708c8b1835c01fea72d4..dc66436add98a3c795efa3ddf0889f09f1d7abe3 100644 (file)
@@ -309,4 +309,7 @@ struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
                                            union perf_event *event);
 
 bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
+
+void perf_evlist__force_leader(struct perf_evlist *evlist);
+
 #endif /* __PERF_EVLIST_H */
index 4cd2cf93f7263e97307b69ec65d0e5c752be51fa..150db5ed7400e3ea374477e233b5255dff49215d 100644 (file)
@@ -2862,7 +2862,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
                        return scnprintf(msg, size,
                                         "Not enough memory to setup event with callchain.\n"
                                         "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
-                                        "Hint: Current value: %d", sysctl_perf_event_max_stack);
+                                        "Hint: Current value: %d", sysctl__max_stack());
                break;
        case ENODEV:
                if (target->cpu_list)
index c540d47583e763292f3392744c51a00319d56586..aafbe54fd3faee74100f91e8381aadc687cc7401 100644 (file)
@@ -114,7 +114,7 @@ gen_build_id(struct buildid_note *note,
 
        fd = open("/dev/urandom", O_RDONLY);
        if (fd == -1)
-               err(1, "cannot access /dev/urandom for builid");
+               err(1, "cannot access /dev/urandom for buildid");
 
        sret = read(fd, note->build_id, sz);
 
index 72db2744876d9a5993b8b58b9c4b9c5f7b397f2b..7f0c83b6332bfd94ca92eac2723c700421cf119a 100644 (file)
@@ -335,8 +335,7 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
        if (!thread)
                return -1;
 
-       thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
-       if (!al.map || !al.map->dso)
+       if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
                goto out_put;
 
        len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
index 0effaff57020b295c8097ff884a9bc3066b95edb..492986a25ef66a78cfda4e4a601f2e4cae78b9d4 100644 (file)
@@ -442,8 +442,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
        }
 
        while (1) {
-               thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al);
-               if (!al.map || !al.map->dso)
+               if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
                        return -EINVAL;
 
                if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
@@ -596,8 +595,7 @@ static int __intel_pt_pgd_ip(uint64_t ip, void *data)
        if (!thread)
                return -EINVAL;
 
-       thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
-       if (!al.map || !al.map->dso)
+       if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
                return -EINVAL;
 
        offset = al.map->map_ip(al.map, ip);
@@ -1565,7 +1563,7 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
        if (map__load(map))
                return 0;
 
-       start = dso__first_symbol(map->dso, MAP__FUNCTION);
+       start = dso__first_symbol(map->dso);
 
        for (sym = start; sym; sym = dso__next_symbol(sym)) {
                if (sym->binding == STB_GLOBAL &&
index 1cca0a2fa641b8acaf64e7d5f4a2cfa57dee6de4..976e658e38dce762163bb583f1ab02b39231a742 100644 (file)
 #include "config.h"
 #include "util.h"
 #include <sys/wait.h>
+#include <subcmd/exec-cmd.h>
 
 #define CLANG_BPF_CMD_DEFAULT_TEMPLATE                         \
                "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
                "-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE "     \
-               "$CLANG_OPTIONS $KERNEL_INC_OPTIONS "           \
+               "$CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS " \
                "-Wno-unused-value -Wno-pointer-sign "          \
                "-working-directory $WORKING_DIR "              \
                "-c \"$CLANG_SOURCE\" -target bpf -O2 -o -"
@@ -212,7 +213,7 @@ version_notice(void)
 "     \t\thttp://llvm.org/apt\n\n"
 "     \tIf you are using old version of clang, change 'clang-bpf-cmd-template'\n"
 "     \toption in [llvm] section of ~/.perfconfig to:\n\n"
-"     \t  \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS \\\n"
+"     \t  \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS \\\n"
 "     \t     -working-directory $WORKING_DIR -c $CLANG_SOURCE \\\n"
 "     \t     -emit-llvm -o - | /path/to/llc -march=bpf -filetype=obj -o -\"\n"
 "     \t(Replace /path/to/llc with path to your llc)\n\n"
@@ -431,9 +432,11 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
        const char *clang_opt = llvm_param.clang_opt;
        char clang_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64];
        char serr[STRERR_BUFSIZE];
-       char *kbuild_dir = NULL, *kbuild_include_opts = NULL;
+       char *kbuild_dir = NULL, *kbuild_include_opts = NULL,
+            *perf_bpf_include_opts = NULL;
        const char *template = llvm_param.clang_bpf_cmd_template;
-       char *command_echo, *command_out;
+       char *command_echo = NULL, *command_out;
+       char *perf_include_dir = system_path(PERF_INCLUDE_DIR);
 
        if (path[0] != '-' && realpath(path, abspath) == NULL) {
                err = errno;
@@ -471,12 +474,14 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
 
        snprintf(linux_version_code_str, sizeof(linux_version_code_str),
                 "0x%x", kernel_version);
-
+       if (asprintf(&perf_bpf_include_opts, "-I%s/bpf", perf_include_dir) < 0)
+               goto errout;
        force_set_env("NR_CPUS", nr_cpus_avail_str);
        force_set_env("LINUX_VERSION_CODE", linux_version_code_str);
        force_set_env("CLANG_EXEC", clang_path);
        force_set_env("CLANG_OPTIONS", clang_opt);
        force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts);
+       force_set_env("PERF_BPF_INC_OPTIONS", perf_bpf_include_opts);
        force_set_env("WORKING_DIR", kbuild_dir ? : ".");
 
        /*
@@ -512,6 +517,8 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
        free(command_out);
        free(kbuild_dir);
        free(kbuild_include_opts);
+       free(perf_bpf_include_opts);
+       free(perf_include_dir);
 
        if (!p_obj_buf)
                free(obj_buf);
@@ -526,6 +533,8 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
        free(kbuild_dir);
        free(kbuild_include_opts);
        free(obj_buf);
+       free(perf_bpf_include_opts);
+       free(perf_include_dir);
        if (p_obj_buf)
                *p_obj_buf = NULL;
        if (p_obj_buf_sz)
index 32d50492505d4b5018dc3fe3794e284e519f8346..e7b4a8b513f2a5be5c3ea4ce7df70a8f626c3394 100644 (file)
@@ -24,6 +24,7 @@
 
 #include "sane_ctype.h"
 #include <symbol/kallsyms.h>
+#include <linux/mman.h>
 
 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
 
@@ -81,8 +82,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
        machine->kptr_restrict_warned = false;
        machine->comm_exec = false;
        machine->kernel_start = 0;
-
-       memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
+       machine->vmlinux_map = NULL;
 
        machine->root_dir = strdup(root_dir);
        if (machine->root_dir == NULL)
@@ -137,13 +137,11 @@ struct machine *machine__new_kallsyms(void)
        struct machine *machine = machine__new_host();
        /*
         * FIXME:
-        * 1) MAP__FUNCTION will go away when we stop loading separate maps for
-        *    functions and data objects.
-        * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
+        * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely
         *    ask for not using the kcore parsing code, once this one is fixed
         *    to create a map per module.
         */
-       if (machine && machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION) <= 0) {
+       if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
                machine__delete(machine);
                machine = NULL;
        }
@@ -673,8 +671,7 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
        if (kmod_path__parse_name(&m, filename))
                return NULL;
 
-       map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
-                                      m.name);
+       map = map_groups__find_by_name(&machine->kmaps, m.name);
        if (map) {
                /*
                 * If the map's dso is an offline module, give dso__load()
@@ -689,7 +686,7 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
        if (dso == NULL)
                goto out;
 
-       map = map__new2(start, dso, MAP__FUNCTION);
+       map = map__new2(start, dso);
        if (map == NULL)
                goto out;
 
@@ -810,8 +807,8 @@ struct process_args {
        u64 start;
 };
 
-static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
-                                          size_t bufsz)
+void machine__get_kallsyms_filename(struct machine *machine, char *buf,
+                                   size_t bufsz)
 {
        if (machine__is_default_guest(machine))
                scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
@@ -854,65 +851,171 @@ static int machine__get_running_kernel_start(struct machine *machine,
        return 0;
 }
 
+int machine__create_extra_kernel_map(struct machine *machine,
+                                    struct dso *kernel,
+                                    struct extra_kernel_map *xm)
+{
+       struct kmap *kmap;
+       struct map *map;
+
+       map = map__new2(xm->start, kernel);
+       if (!map)
+               return -1;
+
+       map->end   = xm->end;
+       map->pgoff = xm->pgoff;
+
+       kmap = map__kmap(map);
+
+       kmap->kmaps = &machine->kmaps;
+       strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
+
+       map_groups__insert(&machine->kmaps, map);
+
+       pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
+                 kmap->name, map->start, map->end);
+
+       map__put(map);
+
+       return 0;
+}
+
+static u64 find_entry_trampoline(struct dso *dso)
+{
+       /* Duplicates are removed so lookup all aliases */
+       const char *syms[] = {
+               "_entry_trampoline",
+               "__entry_trampoline_start",
+               "entry_SYSCALL_64_trampoline",
+       };
+       struct symbol *sym = dso__first_symbol(dso);
+       unsigned int i;
+
+       for (; sym; sym = dso__next_symbol(sym)) {
+               if (sym->binding != STB_GLOBAL)
+                       continue;
+               for (i = 0; i < ARRAY_SIZE(syms); i++) {
+                       if (!strcmp(sym->name, syms[i]))
+                               return sym->start;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * These values can be used for kernels that do not have symbols for the entry
+ * trampolines in kallsyms.
+ */
+#define X86_64_CPU_ENTRY_AREA_PER_CPU  0xfffffe0000000000ULL
+#define X86_64_CPU_ENTRY_AREA_SIZE     0x2c000
+#define X86_64_ENTRY_TRAMPOLINE                0x6000
+
+/* Map x86_64 PTI entry trampolines */
+int machine__map_x86_64_entry_trampolines(struct machine *machine,
+                                         struct dso *kernel)
+{
+       struct map_groups *kmaps = &machine->kmaps;
+       struct maps *maps = &kmaps->maps;
+       int nr_cpus_avail, cpu;
+       bool found = false;
+       struct map *map;
+       u64 pgoff;
+
+       /*
+        * In the vmlinux case, pgoff is a virtual address which must now be
+        * mapped to a vmlinux offset.
+        */
+       for (map = maps__first(maps); map; map = map__next(map)) {
+               struct kmap *kmap = __map__kmap(map);
+               struct map *dest_map;
+
+               if (!kmap || !is_entry_trampoline(kmap->name))
+                       continue;
+
+               dest_map = map_groups__find(kmaps, map->pgoff);
+               if (dest_map != map)
+                       map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
+               found = true;
+       }
+       if (found || machine->trampolines_mapped)
+               return 0;
+
+       pgoff = find_entry_trampoline(kernel);
+       if (!pgoff)
+               return 0;
+
+       nr_cpus_avail = machine__nr_cpus_avail(machine);
+
+       /* Add a 1 page map for each CPU's entry trampoline */
+       for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
+               u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
+                        cpu * X86_64_CPU_ENTRY_AREA_SIZE +
+                        X86_64_ENTRY_TRAMPOLINE;
+               struct extra_kernel_map xm = {
+                       .start = va,
+                       .end   = va + page_size,
+                       .pgoff = pgoff,
+               };
+
+               strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
+
+               if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
+                       return -1;
+       }
+
+       machine->trampolines_mapped = nr_cpus_avail;
+
+       return 0;
+}
+
+int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
+                                            struct dso *kernel __maybe_unused)
+{
+       return 0;
+}
+
 static int
 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
 {
-       int type;
+       struct kmap *kmap;
+       struct map *map;
 
        /* In case of renewal the kernel map, destroy previous one */
        machine__destroy_kernel_maps(machine);
 
-       for (type = 0; type < MAP__NR_TYPES; ++type) {
-               struct kmap *kmap;
-               struct map *map;
-
-               machine->vmlinux_maps[type] = map__new2(0, kernel, type);
-               if (machine->vmlinux_maps[type] == NULL)
-                       return -1;
+       machine->vmlinux_map = map__new2(0, kernel);
+       if (machine->vmlinux_map == NULL)
+               return -1;
 
-               machine->vmlinux_maps[type]->map_ip =
-                       machine->vmlinux_maps[type]->unmap_ip =
-                               identity__map_ip;
-               map = __machine__kernel_map(machine, type);
-               kmap = map__kmap(map);
-               if (!kmap)
-                       return -1;
+       machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
+       map = machine__kernel_map(machine);
+       kmap = map__kmap(map);
+       if (!kmap)
+               return -1;
 
-               kmap->kmaps = &machine->kmaps;
-               map_groups__insert(&machine->kmaps, map);
-       }
+       kmap->kmaps = &machine->kmaps;
+       map_groups__insert(&machine->kmaps, map);
 
        return 0;
 }
 
 void machine__destroy_kernel_maps(struct machine *machine)
 {
-       int type;
-
-       for (type = 0; type < MAP__NR_TYPES; ++type) {
-               struct kmap *kmap;
-               struct map *map = __machine__kernel_map(machine, type);
-
-               if (map == NULL)
-                       continue;
+       struct kmap *kmap;
+       struct map *map = machine__kernel_map(machine);
 
-               kmap = map__kmap(map);
-               map_groups__remove(&machine->kmaps, map);
-               if (kmap && kmap->ref_reloc_sym) {
-                       /*
-                        * ref_reloc_sym is shared among all maps, so free just
-                        * on one of them.
-                        */
-                       if (type == MAP__FUNCTION) {
-                               zfree((char **)&kmap->ref_reloc_sym->name);
-                               zfree(&kmap->ref_reloc_sym);
-                       } else
-                               kmap->ref_reloc_sym = NULL;
-               }
+       if (map == NULL)
+               return;
 
-               map__put(machine->vmlinux_maps[type]);
-               machine->vmlinux_maps[type] = NULL;
+       kmap = map__kmap(map);
+       map_groups__remove(&machine->kmaps, map);
+       if (kmap && kmap->ref_reloc_sym) {
+               zfree((char **)&kmap->ref_reloc_sym->name);
+               zfree(&kmap->ref_reloc_sym);
        }
+
+       map__zput(machine->vmlinux_map);
 }
 
 int machines__create_guest_kernel_maps(struct machines *machines)
@@ -989,32 +1092,31 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
        return machine__create_kernel_maps(machine);
 }
 
-int machine__load_kallsyms(struct machine *machine, const char *filename,
-                            enum map_type type)
+int machine__load_kallsyms(struct machine *machine, const char *filename)
 {
        struct map *map = machine__kernel_map(machine);
        int ret = __dso__load_kallsyms(map->dso, filename, map, true);
 
        if (ret > 0) {
-               dso__set_loaded(map->dso, type);
+               dso__set_loaded(map->dso);
                /*
                 * Since /proc/kallsyms will have multiple sessions for the
                 * kernel, with modules between them, fixup the end of all
                 * sections.
                 */
-               __map_groups__fixup_end(&machine->kmaps, type);
+               map_groups__fixup_end(&machine->kmaps);
        }
 
        return ret;
 }
 
-int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
+int machine__load_vmlinux_path(struct machine *machine)
 {
        struct map *map = machine__kernel_map(machine);
        int ret = dso__load_vmlinux_path(map->dso, map);
 
        if (ret > 0)
-               dso__set_loaded(map->dso, type);
+               dso__set_loaded(map->dso);
 
        return ret;
 }
@@ -1055,10 +1157,9 @@ static bool is_kmod_dso(struct dso *dso)
 static int map_groups__set_module_path(struct map_groups *mg, const char *path,
                                       struct kmod_path *m)
 {
-       struct map *map;
        char *long_name;
+       struct map *map = map_groups__find_by_name(mg, m->name);
 
-       map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
        if (map == NULL)
                return 0;
 
@@ -1207,19 +1308,14 @@ static int machine__create_modules(struct machine *machine)
 static void machine__set_kernel_mmap(struct machine *machine,
                                     u64 start, u64 end)
 {
-       int i;
-
-       for (i = 0; i < MAP__NR_TYPES; i++) {
-               machine->vmlinux_maps[i]->start = start;
-               machine->vmlinux_maps[i]->end   = end;
-
-               /*
-                * Be a bit paranoid here, some perf.data file came with
-                * a zero sized synthesized MMAP event for the kernel.
-                */
-               if (start == 0 && end == 0)
-                       machine->vmlinux_maps[i]->end = ~0ULL;
-       }
+       machine->vmlinux_map->start = start;
+       machine->vmlinux_map->end   = end;
+       /*
+        * Be a bit paranoid here, some perf.data file came with
+        * a zero sized synthesized MMAP event for the kernel.
+        */
+       if (start == 0 && end == 0)
+               machine->vmlinux_map->end = ~0ULL;
 }
 
 int machine__create_kernel_maps(struct machine *machine)
@@ -1234,9 +1330,8 @@ int machine__create_kernel_maps(struct machine *machine)
                return -1;
 
        ret = __machine__create_kernel_maps(machine, kernel);
-       dso__put(kernel);
        if (ret < 0)
-               return -1;
+               goto out_put;
 
        if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
                if (machine__is_host(machine))
@@ -1249,9 +1344,10 @@ int machine__create_kernel_maps(struct machine *machine)
 
        if (!machine__get_running_kernel_start(machine, &name, &addr)) {
                if (name &&
-                   maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
+                   map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) {
                        machine__destroy_kernel_maps(machine);
-                       return -1;
+                       ret = -1;
+                       goto out_put;
                }
 
                /* we have a real start address now, so re-order the kmaps */
@@ -1267,12 +1363,16 @@ int machine__create_kernel_maps(struct machine *machine)
                map__put(map);
        }
 
+       if (machine__create_extra_kernel_maps(machine, kernel))
+               pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
+
        /* update end address of the kernel map using adjacent module address */
        map = map__next(machine__kernel_map(machine));
        if (map)
                machine__set_kernel_mmap(machine, addr, map->start);
-
-       return 0;
+out_put:
+       dso__put(kernel);
+       return ret;
 }
 
 static bool machine__uses_kcore(struct machine *machine)
@@ -1287,6 +1387,32 @@ static bool machine__uses_kcore(struct machine *machine)
        return false;
 }
 
+static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
+                                            union perf_event *event)
+{
+       return machine__is(machine, "x86_64") &&
+              is_entry_trampoline(event->mmap.filename);
+}
+
+static int machine__process_extra_kernel_map(struct machine *machine,
+                                            union perf_event *event)
+{
+       struct map *kernel_map = machine__kernel_map(machine);
+       struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
+       struct extra_kernel_map xm = {
+               .start = event->mmap.start,
+               .end   = event->mmap.start + event->mmap.len,
+               .pgoff = event->mmap.pgoff,
+       };
+
+       if (kernel == NULL)
+               return -1;
+
+       strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
+
+       return machine__create_extra_kernel_map(machine, kernel, &xm);
+}
+
 static int machine__process_kernel_mmap_event(struct machine *machine,
                                              union perf_event *event)
 {
@@ -1379,9 +1505,9 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                 * time /proc/sys/kernel/kptr_restrict was non zero.
                 */
                if (event->mmap.pgoff != 0) {
-                       maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
-                                                        symbol_name,
-                                                        event->mmap.pgoff);
+                       map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
+                                                       symbol_name,
+                                                       event->mmap.pgoff);
                }
 
                if (machine__is_default_guest(machine)) {
@@ -1390,6 +1516,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                         */
                        dso__load(kernel, machine__kernel_map(machine));
                }
+       } else if (perf_event__is_extra_kernel_mmap(machine, event)) {
+               return machine__process_extra_kernel_map(machine, event);
        }
        return 0;
 out_problem:
@@ -1402,7 +1530,6 @@ int machine__process_mmap2_event(struct machine *machine,
 {
        struct thread *thread;
        struct map *map;
-       enum map_type type;
        int ret = 0;
 
        if (dump_trace)
@@ -1421,11 +1548,6 @@ int machine__process_mmap2_event(struct machine *machine,
        if (thread == NULL)
                goto out_problem;
 
-       if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
-               type = MAP__VARIABLE;
-       else
-               type = MAP__FUNCTION;
-
        map = map__new(machine, event->mmap2.start,
                        event->mmap2.len, event->mmap2.pgoff,
                        event->mmap2.maj,
@@ -1433,7 +1555,7 @@ int machine__process_mmap2_event(struct machine *machine,
                        event->mmap2.ino_generation,
                        event->mmap2.prot,
                        event->mmap2.flags,
-                       event->mmap2.filename, type, thread);
+                       event->mmap2.filename, thread);
 
        if (map == NULL)
                goto out_problem_map;
@@ -1460,7 +1582,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
 {
        struct thread *thread;
        struct map *map;
-       enum map_type type;
+       u32 prot = 0;
        int ret = 0;
 
        if (dump_trace)
@@ -1479,16 +1601,14 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
        if (thread == NULL)
                goto out_problem;
 
-       if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
-               type = MAP__VARIABLE;
-       else
-               type = MAP__FUNCTION;
+       if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
+               prot = PROT_EXEC;
 
        map = map__new(machine, event->mmap.start,
                        event->mmap.len, event->mmap.pgoff,
-                       0, 0, 0, 0, 0, 0,
+                       0, 0, 0, 0, prot, 0,
                        event->mmap.filename,
-                       type, thread);
+                       thread);
 
        if (map == NULL)
                goto out_problem_map;
@@ -1664,7 +1784,7 @@ static void ip__resolve_ams(struct thread *thread,
         * Thus, we have to try consecutively until we find a match
         * or else, the symbol is unknown
         */
-       thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
+       thread__find_cpumode_addr_location(thread, ip, &al);
 
        ams->addr = ip;
        ams->al_addr = al.addr;
@@ -1681,15 +1801,7 @@ static void ip__resolve_data(struct thread *thread,
 
        memset(&al, 0, sizeof(al));
 
-       thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
-       if (al.map == NULL) {
-               /*
-                * some shared data regions have execute bit set which puts
-                * their mapping in the MAP__FUNCTION type array.
-                * Check there as a fallback option before dropping the sample.
-                */
-               thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
-       }
+       thread__find_symbol(thread, m, addr, &al);
 
        ams->addr = addr;
        ams->al_addr = al.addr;
@@ -1758,8 +1870,7 @@ static int add_callchain_ip(struct thread *thread,
        al.filtered = 0;
        al.sym = NULL;
        if (!cpumode) {
-               thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
-                                                  ip, &al);
+               thread__find_cpumode_addr_location(thread, ip, &al);
        } else {
                if (ip >= PERF_CONTEXT_MAX) {
                        switch (ip) {
@@ -1784,8 +1895,7 @@ static int add_callchain_ip(struct thread *thread,
                        }
                        return 0;
                }
-               thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
-                                          ip, &al);
+               thread__find_symbol(thread, *cpumode, ip, &al);
        }
 
        if (al.sym != NULL) {
@@ -1810,7 +1920,7 @@ static int add_callchain_ip(struct thread *thread,
        }
 
        srcline = callchain_srcline(al.map, al.sym, al.addr);
-       return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
+       return callchain_cursor_append(cursor, ip, al.map, al.sym,
                                       branch, flags, nr_loop_iter,
                                       iter_cycles, branch_from, srcline);
 }
@@ -2342,6 +2452,20 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
        return 0;
 }
 
+/*
+ * Compares the raw arch string. N.B. see instead perf_env__arch() if a
+ * normalized arch is needed.
+ */
+bool machine__is(struct machine *machine, const char *arch)
+{
+       return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
+}
+
+int machine__nr_cpus_avail(struct machine *machine)
+{
+       return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
+}
+
 int machine__get_kernel_start(struct machine *machine)
 {
        struct map *map = machine__kernel_map(machine);
@@ -2358,7 +2482,12 @@ int machine__get_kernel_start(struct machine *machine)
        machine->kernel_start = 1ULL << 63;
        if (map) {
                err = map__load(map);
-               if (!err)
+               /*
+                * On x86_64, PTI entry trampolines are less than the
+                * start of kernel text, but still above 2^63. So leave
+                * kernel_start = 1ULL << 63 for x86_64.
+                */
+               if (!err && !machine__is(machine, "x86_64"))
                        machine->kernel_start = map->start;
        }
        return err;
@@ -2373,7 +2502,7 @@ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, ch
 {
        struct machine *machine = vmachine;
        struct map *map;
-       struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
+       struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
 
        if (sym == NULL)
                return NULL;
index 66cc200ef86f20282cba3732cd0635cd13f05931..1de7660d93e97430382c00318dab288dd025ec39 100644 (file)
@@ -49,13 +49,14 @@ struct machine {
        struct perf_env   *env;
        struct dsos       dsos;
        struct map_groups kmaps;
-       struct map        *vmlinux_maps[MAP__NR_TYPES];
+       struct map        *vmlinux_map;
        u64               kernel_start;
        pid_t             *current_tid;
        union { /* Tool specific area */
                void      *priv;
                u64       db_id;
        };
+       bool              trampolines_mapped;
 };
 
 static inline struct threads *machine__threads(struct machine *machine, pid_t tid)
@@ -64,16 +65,22 @@ static inline struct threads *machine__threads(struct machine *machine, pid_t ti
        return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE];
 }
 
+/*
+ * The main kernel (vmlinux) map
+ */
 static inline
-struct map *__machine__kernel_map(struct machine *machine, enum map_type type)
+struct map *machine__kernel_map(struct machine *machine)
 {
-       return machine->vmlinux_maps[type];
+       return machine->vmlinux_map;
 }
 
+/*
+ * kernel (the one returned by machine__kernel_map()) plus kernel modules maps
+ */
 static inline
-struct map *machine__kernel_map(struct machine *machine)
+struct maps *machine__kernel_maps(struct machine *machine)
 {
-       return __machine__kernel_map(machine, MAP__FUNCTION);
+       return &machine->kmaps.maps;
 }
 
 int machine__get_kernel_start(struct machine *machine);
@@ -182,6 +189,9 @@ static inline bool machine__is_host(struct machine *machine)
        return machine ? machine->pid == HOST_KERNEL_ID : false;
 }
 
+bool machine__is(struct machine *machine, const char *arch);
+int machine__nr_cpus_avail(struct machine *machine);
+
 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
 
@@ -190,44 +200,27 @@ struct dso *machine__findnew_dso(struct machine *machine, const char *filename);
 size_t machine__fprintf(struct machine *machine, FILE *fp);
 
 static inline
-struct symbol *machine__find_kernel_symbol(struct machine *machine,
-                                          enum map_type type, u64 addr,
+struct symbol *machine__find_kernel_symbol(struct machine *machine, u64 addr,
                                           struct map **mapp)
 {
-       return map_groups__find_symbol(&machine->kmaps, type, addr, mapp);
+       return map_groups__find_symbol(&machine->kmaps, addr, mapp);
 }
 
 static inline
 struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
-                                                  enum map_type type, const char *name,
+                                                  const char *name,
                                                   struct map **mapp)
 {
-       return map_groups__find_symbol_by_name(&machine->kmaps, type, name, mapp);
-}
-
-static inline
-struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr,
-                                            struct map **mapp)
-{
-       return machine__find_kernel_symbol(machine, MAP__FUNCTION, addr,
-                                          mapp);
-}
-
-static inline
-struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
-                                                    const char *name,
-                                                    struct map **mapp)
-{
-       return map_groups__find_function_by_name(&machine->kmaps, name, mapp);
+       return map_groups__find_symbol_by_name(&machine->kmaps, name, mapp);
 }
 
 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
                                        const char *filename);
 int arch__fix_module_text_start(u64 *start, const char *name);
 
-int machine__load_kallsyms(struct machine *machine, const char *filename,
-                          enum map_type type);
-int machine__load_vmlinux_path(struct machine *machine, enum map_type type);
+int machine__load_kallsyms(struct machine *machine, const char *filename);
+
+int machine__load_vmlinux_path(struct machine *machine);
 
 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
                                     bool (skip)(struct dso *dso, int parm), int parm);
@@ -276,4 +269,25 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
  */
 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp);
 
+void machine__get_kallsyms_filename(struct machine *machine, char *buf,
+                                   size_t bufsz);
+
+int machine__create_extra_kernel_maps(struct machine *machine,
+                                     struct dso *kernel);
+
+/* Kernel-space maps for symbols that are outside the main kernel map and module maps */
+struct extra_kernel_map {
+       u64 start;
+       u64 end;
+       u64 pgoff;
+       char name[KMAP_NAME_LEN];
+};
+
+int machine__create_extra_kernel_map(struct machine *machine,
+                                    struct dso *kernel,
+                                    struct extra_kernel_map *xm);
+
+int machine__map_x86_64_entry_trampolines(struct machine *machine,
+                                         struct dso *kernel);
+
 #endif /* __PERF_MACHINE_H */
index 8fe57031e1a85ba3816b322e30526bcf9f237d14..6ae97eda370bd2d8dda91f0048aa22bd1fa330ef 100644 (file)
 
 static void __maps__insert(struct maps *maps, struct map *map);
 
-const char *map_type__name[MAP__NR_TYPES] = {
-       [MAP__FUNCTION] = "Functions",
-       [MAP__VARIABLE] = "Variables",
-};
-
 static inline int is_anon_memory(const char *filename, u32 flags)
 {
        return flags & MAP_HUGETLB ||
@@ -129,10 +124,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
        return false;
 }
 
-void map__init(struct map *map, enum map_type type,
-              u64 start, u64 end, u64 pgoff, struct dso *dso)
+void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
 {
-       map->type     = type;
        map->start    = start;
        map->end      = end;
        map->pgoff    = pgoff;
@@ -149,7 +142,7 @@ void map__init(struct map *map, enum map_type type,
 struct map *map__new(struct machine *machine, u64 start, u64 len,
                     u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
                     u64 ino_gen, u32 prot, u32 flags, char *filename,
-                    enum map_type type, struct thread *thread)
+                    struct thread *thread)
 {
        struct map *map = malloc(sizeof(*map));
        struct nsinfo *nsi = NULL;
@@ -173,7 +166,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
                map->flags = flags;
                nsi = nsinfo__get(thread->nsinfo);
 
-               if ((anon || no_dso) && nsi && type == MAP__FUNCTION) {
+               if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
                        snprintf(newfilename, sizeof(newfilename),
                                 "/tmp/perf-%d.map", nsi->pid);
                        filename = newfilename;
@@ -203,7 +196,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
                if (dso == NULL)
                        goto out_delete;
 
-               map__init(map, type, start, start + len, pgoff, dso);
+               map__init(map, start, start + len, pgoff, dso);
 
                if (anon || no_dso) {
                        map->map_ip = map->unmap_ip = identity__map_ip;
@@ -213,8 +206,8 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
                         * functions still return NULL, and we avoid the
                         * unnecessary map__load warning.
                         */
-                       if (type != MAP__FUNCTION)
-                               dso__set_loaded(dso, map->type);
+                       if (!(prot & PROT_EXEC))
+                               dso__set_loaded(dso);
                }
                dso->nsinfo = nsi;
                dso__put(dso);
@@ -231,7 +224,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
  * they are loaded) and for vmlinux, where only after we load all the
  * symbols we'll know where it starts and ends.
  */
-struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
+struct map *map__new2(u64 start, struct dso *dso)
 {
        struct map *map = calloc(1, (sizeof(*map) +
                                     (dso->kernel ? sizeof(struct kmap) : 0)));
@@ -239,7 +232,7 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
                /*
                 * ->end will be filled after we load all the symbols
                 */
-               map__init(map, type, start, 0, 0, dso);
+               map__init(map, start, 0, 0, dso);
        }
 
        return map;
@@ -256,7 +249,19 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
  */
 bool __map__is_kernel(const struct map *map)
 {
-       return __machine__kernel_map(map->groups->machine, map->type) == map;
+       return machine__kernel_map(map->groups->machine) == map;
+}
+
+bool __map__is_extra_kernel_map(const struct map *map)
+{
+       struct kmap *kmap = __map__kmap((struct map *)map);
+
+       return kmap && kmap->name[0];
+}
+
+bool map__has_symbols(const struct map *map)
+{
+       return dso__has_symbols(map->dso);
 }
 
 static void map__exit(struct map *map)
@@ -279,7 +284,7 @@ void map__put(struct map *map)
 
 void map__fixup_start(struct map *map)
 {
-       struct rb_root *symbols = &map->dso->symbols[map->type];
+       struct rb_root *symbols = &map->dso->symbols;
        struct rb_node *nd = rb_first(symbols);
        if (nd != NULL) {
                struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
@@ -289,7 +294,7 @@ void map__fixup_start(struct map *map)
 
 void map__fixup_end(struct map *map)
 {
-       struct rb_root *symbols = &map->dso->symbols[map->type];
+       struct rb_root *symbols = &map->dso->symbols;
        struct rb_node *nd = rb_last(symbols);
        if (nd != NULL) {
                struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
@@ -304,7 +309,7 @@ int map__load(struct map *map)
        const char *name = map->dso->long_name;
        int nr;
 
-       if (dso__loaded(map->dso, map->type))
+       if (dso__loaded(map->dso))
                return 0;
 
        nr = dso__load(map->dso, map);
@@ -348,7 +353,7 @@ struct symbol *map__find_symbol(struct map *map, u64 addr)
        if (map__load(map) < 0)
                return NULL;
 
-       return dso__find_symbol(map->dso, map->type, addr);
+       return dso__find_symbol(map->dso, addr);
 }
 
 struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
@@ -356,10 +361,10 @@ struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
        if (map__load(map) < 0)
                return NULL;
 
-       if (!dso__sorted_by_name(map->dso, map->type))
-               dso__sort_by_name(map->dso, map->type);
+       if (!dso__sorted_by_name(map->dso))
+               dso__sort_by_name(map->dso);
 
-       return dso__find_symbol_by_name(map->dso, map->type, name);
+       return dso__find_symbol_by_name(map->dso, name);
 }
 
 struct map *map__clone(struct map *from)
@@ -494,10 +499,7 @@ static void maps__init(struct maps *maps)
 
 void map_groups__init(struct map_groups *mg, struct machine *machine)
 {
-       int i;
-       for (i = 0; i < MAP__NR_TYPES; ++i) {
-               maps__init(&mg->maps[i]);
-       }
+       maps__init(&mg->maps);
        mg->machine = machine;
        refcount_set(&mg->refcnt, 1);
 }
@@ -525,22 +527,12 @@ static void maps__exit(struct maps *maps)
 
 void map_groups__exit(struct map_groups *mg)
 {
-       int i;
-
-       for (i = 0; i < MAP__NR_TYPES; ++i)
-               maps__exit(&mg->maps[i]);
+       maps__exit(&mg->maps);
 }
 
 bool map_groups__empty(struct map_groups *mg)
 {
-       int i;
-
-       for (i = 0; i < MAP__NR_TYPES; ++i) {
-               if (maps__first(&mg->maps[i]))
-                       return false;
-       }
-
-       return true;
+       return !maps__first(&mg->maps);
 }
 
 struct map_groups *map_groups__new(struct machine *machine)
@@ -566,10 +558,9 @@ void map_groups__put(struct map_groups *mg)
 }
 
 struct symbol *map_groups__find_symbol(struct map_groups *mg,
-                                      enum map_type type, u64 addr,
-                                      struct map **mapp)
+                                      u64 addr, struct map **mapp)
 {
-       struct map *map = map_groups__find(mg, type, addr);
+       struct map *map = map_groups__find(mg, addr);
 
        /* Ensure map is loaded before using map->map_ip */
        if (map != NULL && map__load(map) >= 0) {
@@ -608,13 +599,10 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
 }
 
 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
-                                              enum map_type type,
                                               const char *name,
                                               struct map **mapp)
 {
-       struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp);
-
-       return sym;
+       return maps__find_symbol_by_name(&mg->maps, name, mapp);
 }
 
 int map_groups__find_ams(struct addr_map_symbol *ams)
@@ -622,8 +610,7 @@ int map_groups__find_ams(struct addr_map_symbol *ams)
        if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
                if (ams->map->groups == NULL)
                        return -1;
-               ams->map = map_groups__find(ams->map->groups, ams->map->type,
-                                           ams->addr);
+               ams->map = map_groups__find(ams->map->groups, ams->addr);
                if (ams->map == NULL)
                        return -1;
        }
@@ -646,7 +633,7 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
                printed += fprintf(fp, "Map:");
                printed += map__fprintf(pos, fp);
                if (verbose > 2) {
-                       printed += dso__fprintf(pos->dso, pos->type, fp);
+                       printed += dso__fprintf(pos->dso, fp);
                        printed += fprintf(fp, "--\n");
                }
        }
@@ -656,24 +643,14 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
        return printed;
 }
 
-size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
-                                 FILE *fp)
-{
-       size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
-       return printed += maps__fprintf(&mg->maps[type], fp);
-}
-
 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
 {
-       size_t printed = 0, i;
-       for (i = 0; i < MAP__NR_TYPES; ++i)
-               printed += __map_groups__fprintf_maps(mg, i, fp);
-       return printed;
+       return maps__fprintf(&mg->maps, fp);
 }
 
 static void __map_groups__insert(struct map_groups *mg, struct map *map)
 {
-       __maps__insert(&mg->maps[map->type], map);
+       __maps__insert(&mg->maps, map);
        map->groups = mg;
 }
 
@@ -758,19 +735,18 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
                                   FILE *fp)
 {
-       return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
+       return maps__fixup_overlappings(&mg->maps, map, fp);
 }
 
 /*
  * XXX This should not really _copy_ te maps, but refcount them.
  */
-int map_groups__clone(struct thread *thread,
-                     struct map_groups *parent, enum map_type type)
+int map_groups__clone(struct thread *thread, struct map_groups *parent)
 {
        struct map_groups *mg = thread->mg;
        int err = -ENOMEM;
        struct map *map;
-       struct maps *maps = &parent->maps[type];
+       struct maps *maps = &parent->maps;
 
        down_read(&maps->lock);
 
@@ -877,15 +853,22 @@ struct map *map__next(struct map *map)
        return NULL;
 }
 
-struct kmap *map__kmap(struct map *map)
+struct kmap *__map__kmap(struct map *map)
 {
-       if (!map->dso || !map->dso->kernel) {
-               pr_err("Internal error: map__kmap with a non-kernel map\n");
+       if (!map->dso || !map->dso->kernel)
                return NULL;
-       }
        return (struct kmap *)(map + 1);
 }
 
+struct kmap *map__kmap(struct map *map)
+{
+       struct kmap *kmap = __map__kmap(map);
+
+       if (!kmap)
+               pr_err("Internal error: map__kmap with a non-kernel map\n");
+       return kmap;
+}
+
 struct map_groups *map__kmaps(struct map *map)
 {
        struct kmap *kmap = map__kmap(map);
index 0e9bbe01b0abc934daec873254e06c3da36f4d8f..97e2a063bd654a7d2ea0493bd347d6f1438c7e7c 100644 (file)
@@ -8,19 +8,11 @@
 #include <linux/rbtree.h>
 #include <pthread.h>
 #include <stdio.h>
+#include <string.h>
 #include <stdbool.h>
 #include <linux/types.h>
 #include "rwsem.h"
 
-enum map_type {
-       MAP__FUNCTION = 0,
-       MAP__VARIABLE,
-};
-
-#define MAP__NR_TYPES (MAP__VARIABLE + 1)
-
-extern const char *map_type__name[MAP__NR_TYPES];
-
 struct dso;
 struct ip_callchain;
 struct ref_reloc_sym;
@@ -35,7 +27,6 @@ struct map {
        };
        u64                     start;
        u64                     end;
-       u8 /* enum map_type */  type;
        bool                    erange_warned;
        u32                     priv;
        u32                     prot;
@@ -56,9 +47,12 @@ struct map {
        refcount_t              refcnt;
 };
 
+#define KMAP_NAME_LEN 256
+
 struct kmap {
        struct ref_reloc_sym    *ref_reloc_sym;
        struct map_groups       *kmaps;
+       char                    name[KMAP_NAME_LEN];
 };
 
 struct maps {
@@ -67,7 +61,7 @@ struct maps {
 };
 
 struct map_groups {
-       struct maps      maps[MAP__NR_TYPES];
+       struct maps      maps;
        struct machine   *machine;
        refcount_t       refcnt;
 };
@@ -85,6 +79,7 @@ static inline struct map_groups *map_groups__get(struct map_groups *mg)
 
 void map_groups__put(struct map_groups *mg);
 
+struct kmap *__map__kmap(struct map *map);
 struct kmap *map__kmap(struct map *map);
 struct map_groups *map__kmaps(struct map *map);
 
@@ -125,7 +120,7 @@ struct thread;
  * Note: caller must ensure map->dso is not NULL (map is loaded).
  */
 #define map__for_each_symbol(map, pos, n)      \
-       dso__for_each_symbol(map->dso, pos, n, map->type)
+       dso__for_each_symbol(map->dso, pos, n)
 
 /* map__for_each_symbol_with_name - iterate over the symbols in the given map
  *                                  that have the given name
@@ -144,13 +139,13 @@ struct thread;
 #define map__for_each_symbol_by_name(map, sym_name, pos)               \
        __map__for_each_symbol_by_name(map, sym_name, (pos))
 
-void map__init(struct map *map, enum map_type type,
+void map__init(struct map *map,
               u64 start, u64 end, u64 pgoff, struct dso *dso);
 struct map *map__new(struct machine *machine, u64 start, u64 len,
                     u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
                     u64 ino_gen, u32 prot, u32 flags,
-                    char *filename, enum map_type type, struct thread *thread);
-struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
+                    char *filename, struct thread *thread);
+struct map *map__new2(u64 start, struct dso *dso);
 void map__delete(struct map *map);
 struct map *map__clone(struct map *map);
 
@@ -185,8 +180,6 @@ void map__fixup_end(struct map *map);
 
 void map__reloc_vmlinux(struct map *map);
 
-size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
-                                 FILE *fp);
 void maps__insert(struct maps *maps, struct map *map);
 void maps__remove(struct maps *maps, struct map *map);
 struct map *maps__find(struct maps *maps, u64 addr);
@@ -197,34 +190,29 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
 void map_groups__init(struct map_groups *mg, struct machine *machine);
 void map_groups__exit(struct map_groups *mg);
 int map_groups__clone(struct thread *thread,
-                     struct map_groups *parent, enum map_type type);
+                     struct map_groups *parent);
 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
 
-int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
-                                    u64 addr);
+int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
+                                   u64 addr);
 
 static inline void map_groups__insert(struct map_groups *mg, struct map *map)
 {
-       maps__insert(&mg->maps[map->type], map);
+       maps__insert(&mg->maps, map);
        map->groups = mg;
 }
 
 static inline void map_groups__remove(struct map_groups *mg, struct map *map)
 {
-       maps__remove(&mg->maps[map->type], map);
+       maps__remove(&mg->maps, map);
 }
 
-static inline struct map *map_groups__find(struct map_groups *mg,
-                                          enum map_type type, u64 addr)
+static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
 {
-       return maps__find(&mg->maps[type], addr);
+       return maps__find(&mg->maps, addr);
 }
 
-static inline struct map *map_groups__first(struct map_groups *mg,
-                                           enum map_type type)
-{
-       return maps__first(&mg->maps[type]);
-}
+struct map *map_groups__first(struct map_groups *mg);
 
 static inline struct map *map_groups__next(struct map *map)
 {
@@ -232,11 +220,9 @@ static inline struct map *map_groups__next(struct map *map)
 }
 
 struct symbol *map_groups__find_symbol(struct map_groups *mg,
-                                      enum map_type type, u64 addr,
-                                      struct map **mapp);
+                                      u64 addr, struct map **mapp);
 
 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
-                                              enum map_type type,
                                               const char *name,
                                               struct map **mapp);
 
@@ -244,24 +230,26 @@ struct addr_map_symbol;
 
 int map_groups__find_ams(struct addr_map_symbol *ams);
 
-static inline
-struct symbol *map_groups__find_function_by_name(struct map_groups *mg,
-                                                const char *name, struct map **mapp)
-{
-       return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp);
-}
-
 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
                                   FILE *fp);
 
-struct map *map_groups__find_by_name(struct map_groups *mg,
-                                    enum map_type type, const char *name);
+struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
 
 bool __map__is_kernel(const struct map *map);
+bool __map__is_extra_kernel_map(const struct map *map);
 
 static inline bool __map__is_kmodule(const struct map *map)
 {
-       return !__map__is_kernel(map);
+       return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map);
+}
+
+bool map__has_symbols(const struct map *map);
+
+#define ENTRY_TRAMPOLINE_NAME "__entry_SYSCALL_64_trampoline"
+
+static inline bool is_entry_trampoline(const char *name)
+{
+       return !strcmp(name, ENTRY_TRAMPOLINE_NAME);
 }
 
 #endif /* __PERF_MAP_H */
index 2fc4ee8b86c11259d7d95ed343d9f8994cb4781f..15eec49e71a12ad89596f928e39874522c4a65df 100644 (file)
@@ -156,13 +156,12 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
                    (strcmp(sys_dirent->d_name, ".")) &&        \
                    (strcmp(sys_dirent->d_name, "..")))
 
-static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
+static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
 {
        char evt_path[MAXPATHLEN];
        int fd;
 
-       snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
-                       sys_dir->d_name, evt_dir->d_name);
+       snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dir->d_name);
        fd = open(evt_path, O_RDONLY);
        if (fd < 0)
                return -EINVAL;
@@ -171,12 +170,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
        return 0;
 }
 
-#define for_each_event(sys_dirent, evt_dir, evt_dirent)                \
+#define for_each_event(dir_path, evt_dir, evt_dirent)          \
        while ((evt_dirent = readdir(evt_dir)) != NULL)         \
                if (evt_dirent->d_type == DT_DIR &&             \
                    (strcmp(evt_dirent->d_name, ".")) &&        \
                    (strcmp(evt_dirent->d_name, "..")) &&       \
-                   (!tp_event_has_id(sys_dirent, evt_dirent)))
+                   (!tp_event_has_id(dir_path, evt_dirent)))
 
 #define MAX_EVENT_LENGTH 512
 
@@ -190,21 +189,21 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
        int fd;
        u64 id;
        char evt_path[MAXPATHLEN];
-       char dir_path[MAXPATHLEN];
+       char *dir_path;
 
-       sys_dir = opendir(tracing_events_path);
+       sys_dir = tracing_events__opendir();
        if (!sys_dir)
                return NULL;
 
        for_each_subsystem(sys_dir, sys_dirent) {
-
-               snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent->d_name);
+               dir_path = get_events_file(sys_dirent->d_name);
+               if (!dir_path)
+                       continue;
                evt_dir = opendir(dir_path);
                if (!evt_dir)
-                       continue;
+                       goto next;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent) {
+               for_each_event(dir_path, evt_dir, evt_dirent) {
 
                        scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
                                  evt_dirent->d_name);
@@ -218,6 +217,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
                        close(fd);
                        id = atoll(id_buf);
                        if (id == config) {
+                               put_events_file(dir_path);
                                closedir(evt_dir);
                                closedir(sys_dir);
                                path = zalloc(sizeof(*path));
@@ -242,6 +242,8 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
                        }
                }
                closedir(evt_dir);
+next:
+               put_events_file(dir_path);
        }
 
        closedir(sys_dir);
@@ -512,14 +514,19 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
                                      struct parse_events_error *err,
                                      struct list_head *head_config)
 {
-       char evt_path[MAXPATHLEN];
+       char *evt_path;
        struct dirent *evt_ent;
        DIR *evt_dir;
        int ret = 0, found = 0;
 
-       snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
+       evt_path = get_events_file(sys_name);
+       if (!evt_path) {
+               tracepoint_error(err, errno, sys_name, evt_name);
+               return -1;
+       }
        evt_dir = opendir(evt_path);
        if (!evt_dir) {
+               put_events_file(evt_path);
                tracepoint_error(err, errno, sys_name, evt_name);
                return -1;
        }
@@ -545,6 +552,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
                ret = -1;
        }
 
+       put_events_file(evt_path);
        closedir(evt_dir);
        return ret;
 }
@@ -570,7 +578,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
        DIR *events_dir;
        int ret = 0;
 
-       events_dir = opendir(tracing_events_path);
+       events_dir = tracing_events__opendir();
        if (!events_dir) {
                tracepoint_error(err, errno, sys_name, evt_name);
                return -1;
@@ -2092,13 +2100,13 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
        DIR *sys_dir, *evt_dir;
        struct dirent *sys_dirent, *evt_dirent;
        char evt_path[MAXPATHLEN];
-       char dir_path[MAXPATHLEN];
+       char *dir_path;
        char **evt_list = NULL;
        unsigned int evt_i = 0, evt_num = 0;
        bool evt_num_known = false;
 
 restart:
-       sys_dir = opendir(tracing_events_path);
+       sys_dir = tracing_events__opendir();
        if (!sys_dir)
                return;
 
@@ -2113,13 +2121,14 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
                    !strglobmatch(sys_dirent->d_name, subsys_glob))
                        continue;
 
-               snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent->d_name);
+               dir_path = get_events_file(sys_dirent->d_name);
+               if (!dir_path)
+                       continue;
                evt_dir = opendir(dir_path);
                if (!evt_dir)
-                       continue;
+                       goto next;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent) {
+               for_each_event(dir_path, evt_dir, evt_dirent) {
                        if (event_glob != NULL &&
                            !strglobmatch(evt_dirent->d_name, event_glob))
                                continue;
@@ -2133,11 +2142,15 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
                                 sys_dirent->d_name, evt_dirent->d_name);
 
                        evt_list[evt_i] = strdup(evt_path);
-                       if (evt_list[evt_i] == NULL)
+                       if (evt_list[evt_i] == NULL) {
+                               put_events_file(dir_path);
                                goto out_close_evt_dir;
+                       }
                        evt_i++;
                }
                closedir(evt_dir);
+next:
+               put_events_file(dir_path);
        }
        closedir(sys_dir);
 
@@ -2185,21 +2198,21 @@ int is_valid_tracepoint(const char *event_string)
        DIR *sys_dir, *evt_dir;
        struct dirent *sys_dirent, *evt_dirent;
        char evt_path[MAXPATHLEN];
-       char dir_path[MAXPATHLEN];
+       char *dir_path;
 
-       sys_dir = opendir(tracing_events_path);
+       sys_dir = tracing_events__opendir();
        if (!sys_dir)
                return 0;
 
        for_each_subsystem(sys_dir, sys_dirent) {
-
-               snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent->d_name);
+               dir_path = get_events_file(sys_dirent->d_name);
+               if (!dir_path)
+                       continue;
                evt_dir = opendir(dir_path);
                if (!evt_dir)
-                       continue;
+                       goto next;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent) {
+               for_each_event(dir_path, evt_dir, evt_dirent) {
                        snprintf(evt_path, MAXPATHLEN, "%s:%s",
                                 sys_dirent->d_name, evt_dirent->d_name);
                        if (!strcmp(evt_path, event_string)) {
@@ -2209,6 +2222,8 @@ int is_valid_tracepoint(const char *event_string)
                        }
                }
                closedir(evt_dir);
+next:
+               put_events_file(dir_path);
        }
        closedir(sys_dir);
        return 0;
index e1dbc9821617025c04086bae0cfdeaf3f3a7f025..3094f11e7d81573f6f051b75ea212dc36ffc9893 100644 (file)
@@ -111,17 +111,6 @@ void exit_probe_symbol_maps(void)
        symbol__exit();
 }
 
-static struct symbol *__find_kernel_function_by_name(const char *name,
-                                                    struct map **mapp)
-{
-       return machine__find_kernel_function_by_name(host_machine, name, mapp);
-}
-
-static struct symbol *__find_kernel_function(u64 addr, struct map **mapp)
-{
-       return machine__find_kernel_function(host_machine, addr, mapp);
-}
-
 static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
 {
        /* kmap->ref_reloc_sym should be set if host_machine is initialized */
@@ -149,7 +138,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
        if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
                *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
        else {
-               sym = __find_kernel_function_by_name(name, &map);
+               sym = machine__find_kernel_symbol_by_name(host_machine, name, &map);
                if (!sym)
                        return -ENOENT;
                *addr = map->unmap_ip(map, sym->start) -
@@ -161,8 +150,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
 
 static struct map *kernel_get_module_map(const char *module)
 {
-       struct map_groups *grp = &host_machine->kmaps;
-       struct maps *maps = &grp->maps[MAP__FUNCTION];
+       struct maps *maps = machine__kernel_maps(host_machine);
        struct map *pos;
 
        /* A file path -- this is an offline module */
@@ -341,7 +329,7 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
                char module_name[128];
 
                snprintf(module_name, sizeof(module_name), "[%s]", module);
-               map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name);
+               map = map_groups__find_by_name(&host_machine->kmaps, module_name);
                if (map) {
                        dso = map->dso;
                        goto found;
@@ -2098,7 +2086,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
                }
                if (addr) {
                        addr += tp->offset;
-                       sym = __find_kernel_function(addr, &map);
+                       sym = machine__find_kernel_symbol(host_machine, addr, &map);
                }
        }
 
@@ -3504,19 +3492,18 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
                               (target) ? : "kernel");
                goto end;
        }
-       if (!dso__sorted_by_name(map->dso, map->type))
-               dso__sort_by_name(map->dso, map->type);
+       if (!dso__sorted_by_name(map->dso))
+               dso__sort_by_name(map->dso);
 
        /* Show all (filtered) symbols */
        setup_pager();
 
-        for (nd = rb_first(&map->dso->symbol_names[map->type]); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) {
                struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
 
                if (strfilter__compare(_filter, pos->sym.name))
                        printf("%s\n", pos->sym.name);
-        }
-
+       }
 end:
        map__put(map);
        exit_probe_symbol_maps();
index 4ae1123c67949a01cadc1f4080728b038e15b88b..b76088fadf3d02476fec0270870d8f4cfb9a84ca 100644 (file)
@@ -84,8 +84,7 @@ int open_trace_file(const char *trace_file, bool readwrite)
        char buf[PATH_MAX];
        int ret;
 
-       ret = e_snprintf(buf, PATH_MAX, "%s/%s",
-                        tracing_path, trace_file);
+       ret = e_snprintf(buf, PATH_MAX, "%s/%s", tracing_path_mount(), trace_file);
        if (ret >= 0) {
                pr_debug("Opening %s write=%d\n", buf, readwrite);
                if (readwrite && !probe_event_dry_run)
index f4a7a437ee87a2c3d7e7501991e237be3ed6d852..b998bb475589ba7760455da7a6f2106db2f91e86 100644 (file)
@@ -1973,12 +1973,11 @@ bool perf_session__has_traces(struct perf_session *session, const char *msg)
        return false;
 }
 
-int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
-                                    const char *symbol_name, u64 addr)
+int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
 {
        char *bracket;
-       int i;
        struct ref_reloc_sym *ref;
+       struct kmap *kmap;
 
        ref = zalloc(sizeof(struct ref_reloc_sym));
        if (ref == NULL)
@@ -1996,13 +1995,9 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
 
        ref->addr = addr;
 
-       for (i = 0; i < MAP__NR_TYPES; ++i) {
-               struct kmap *kmap = map__kmap(maps[i]);
-
-               if (!kmap)
-                       continue;
+       kmap = map__kmap(map);
+       if (kmap)
                kmap->ref_reloc_sym = ref;
-       }
 
        return 0;
 }
index 26a68dfd8a4f54040bf0a2afc1bc873203a25bba..4058ade352a53c06680affea09d96b56abedd81b 100644 (file)
@@ -2,7 +2,7 @@
 #include <errno.h>
 #include <inttypes.h>
 #include <regex.h>
-#include <sys/mman.h>
+#include <linux/mman.h>
 #include "sort.h"
 #include "hist.h"
 #include "comm.h"
@@ -282,7 +282,7 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
 
        ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
        if (sym && map) {
-               if (map->type == MAP__VARIABLE) {
+               if (sym->type == STT_OBJECT) {
                        ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
                        ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
                                        ip - map->unmap_ip(map, sym->start));
@@ -1211,7 +1211,7 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
 
                /* print [s] for shared data mmaps */
                if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
-                    map && (map->type == MAP__VARIABLE) &&
+                    map && !(map->prot & PROT_EXEC) &&
                    (map->flags & MAP_SHARED) &&
                    (map->maj || map->min || map->ino ||
                     map->ino_generation))
@@ -2582,7 +2582,7 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
                if (sort__mode != SORT_MODE__MEMORY)
                        return -EINVAL;
 
-               if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0)
+               if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
                        return -EINVAL;
 
                if (sd->entry == &sort_mem_daddr_sym)
@@ -2628,7 +2628,7 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
                if (*tok) {
                        ret = sort_dimension__add(list, tok, evlist, level);
                        if (ret == -EINVAL) {
-                               if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok)))
+                               if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
                                        pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
                                else
                                        pr_err("Invalid --sort key: `%s'", tok);
index 035b62e2c60b930f27b74f99fe8658a2d6ade3fe..9e6896293bbdf8a2742df0a844bd1357c5067650 100644 (file)
@@ -186,13 +186,13 @@ static inline float hist_entry__get_percent_limit(struct hist_entry *he)
 static inline u64 cl_address(u64 address)
 {
        /* return the cacheline of the address */
-       return (address & ~(cacheline_size - 1));
+       return (address & ~(cacheline_size() - 1));
 }
 
 static inline u64 cl_offset(u64 address)
 {
        /* return the cacheline of the address */
-       return (address & (cacheline_size - 1));
+       return (address & (cacheline_size() - 1));
 }
 
 enum sort_mode {
index 3c21fd059b6496e61d4957ccfe1baa9e09a966e9..09d6746e6ec8e34383860356b080bb3acfb8e5a9 100644 (file)
@@ -103,6 +103,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
                inline_sym = symbol__new(base_sym ? base_sym->start : 0,
                                         base_sym ? base_sym->end : 0,
                                         base_sym ? base_sym->binding : 0,
+                                        base_sym ? base_sym->type : 0,
                                         funcname);
                if (inline_sym)
                        inline_sym->inlined = 1;
index 8f56ba4fd258b4f8e2a68d81d6545c6aeb8a65dd..36efb986f7fc640f7d109cec664d6f0e208c1edf 100644 (file)
@@ -7,8 +7,7 @@
 #include "xyarray.h"
 #include "rblist.h"
 
-struct stats
-{
+struct stats {
        double n, mean, M2;
        u64 max, min;
 };
index 2de770511e705dbc8caa7a69f1398548f102720d..29770ea61768b018979cc33e3c34f8a767770316 100644 (file)
@@ -114,16 +114,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
                sym->st_shndx != SHN_ABS;
 }
 
-static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
+static bool elf_sym__filter(GElf_Sym *sym)
 {
-       switch (type) {
-       case MAP__FUNCTION:
-               return elf_sym__is_function(sym);
-       case MAP__VARIABLE:
-               return elf_sym__is_object(sym);
-       default:
-               return false;
-       }
+       return elf_sym__is_function(sym) || elf_sym__is_object(sym);
 }
 
 static inline const char *elf_sym__name(const GElf_Sym *sym,
@@ -150,17 +143,10 @@ static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
        return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
 }
 
-static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
-                         enum map_type type)
+static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
 {
-       switch (type) {
-       case MAP__FUNCTION:
-               return elf_sec__is_text(shdr, secstrs);
-       case MAP__VARIABLE:
-               return elf_sec__is_data(shdr, secstrs);
-       default:
-               return false;
-       }
+       return elf_sec__is_text(shdr, secstrs) || 
+              elf_sec__is_data(shdr, secstrs);
 }
 
 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
@@ -256,7 +242,7 @@ static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
  * And always look at the original dso, not at debuginfo packages, that
  * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
  */
-int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map)
+int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
 {
        uint32_t nr_rel_entries, idx;
        GElf_Sym sym;
@@ -364,12 +350,12 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *
                        free(demangled);
 
                        f = symbol__new(plt_offset, plt_entry_size,
-                                       STB_GLOBAL, sympltname);
+                                       STB_GLOBAL, STT_FUNC, sympltname);
                        if (!f)
                                goto out_elf_end;
 
                        plt_offset += plt_entry_size;
-                       symbols__insert(&dso->symbols[map->type], f);
+                       symbols__insert(&dso->symbols, f);
                        ++nr;
                }
        } else if (shdr_rel_plt.sh_type == SHT_REL) {
@@ -390,12 +376,12 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *
                        free(demangled);
 
                        f = symbol__new(plt_offset, plt_entry_size,
-                                       STB_GLOBAL, sympltname);
+                                       STB_GLOBAL, STT_FUNC, sympltname);
                        if (!f)
                                goto out_elf_end;
 
                        plt_offset += plt_entry_size;
-                       symbols__insert(&dso->symbols[map->type], f);
+                       symbols__insert(&dso->symbols, f);
                        ++nr;
                }
        }
@@ -811,6 +797,110 @@ static u64 ref_reloc(struct kmap *kmap)
 void __weak arch__sym_update(struct symbol *s __maybe_unused,
                GElf_Sym *sym __maybe_unused) { }
 
+static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
+                                     GElf_Sym *sym, GElf_Shdr *shdr,
+                                     struct map_groups *kmaps, struct kmap *kmap,
+                                     struct dso **curr_dsop, struct map **curr_mapp,
+                                     const char *section_name,
+                                     bool adjust_kernel_syms, bool kmodule, bool *remap_kernel)
+{
+       struct dso *curr_dso = *curr_dsop;
+       struct map *curr_map;
+       char dso_name[PATH_MAX];
+
+       /* Adjust symbol to map to file offset */
+       if (adjust_kernel_syms)
+               sym->st_value -= shdr->sh_addr - shdr->sh_offset;
+
+       if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0)
+               return 0;
+
+       if (strcmp(section_name, ".text") == 0) {
+               /*
+                * The initial kernel mapping is based on
+                * kallsyms and identity maps.  Overwrite it to
+                * map to the kernel dso.
+                */
+               if (*remap_kernel && dso->kernel) {
+                       *remap_kernel = false;
+                       map->start = shdr->sh_addr + ref_reloc(kmap);
+                       map->end = map->start + shdr->sh_size;
+                       map->pgoff = shdr->sh_offset;
+                       map->map_ip = map__map_ip;
+                       map->unmap_ip = map__unmap_ip;
+                       /* Ensure maps are correctly ordered */
+                       if (kmaps) {
+                               map__get(map);
+                               map_groups__remove(kmaps, map);
+                               map_groups__insert(kmaps, map);
+                               map__put(map);
+                       }
+               }
+
+               /*
+                * The initial module mapping is based on
+                * /proc/modules mapped to offset zero.
+                * Overwrite it to map to the module dso.
+                */
+               if (*remap_kernel && kmodule) {
+                       *remap_kernel = false;
+                       map->pgoff = shdr->sh_offset;
+               }
+
+               *curr_mapp = map;
+               *curr_dsop = dso;
+               return 0;
+       }
+
+       if (!kmap)
+               return 0;
+
+       snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
+
+       curr_map = map_groups__find_by_name(kmaps, dso_name);
+       if (curr_map == NULL) {
+               u64 start = sym->st_value;
+
+               if (kmodule)
+                       start += map->start + shdr->sh_offset;
+
+               curr_dso = dso__new(dso_name);
+               if (curr_dso == NULL)
+                       return -1;
+               curr_dso->kernel = dso->kernel;
+               curr_dso->long_name = dso->long_name;
+               curr_dso->long_name_len = dso->long_name_len;
+               curr_map = map__new2(start, curr_dso);
+               dso__put(curr_dso);
+               if (curr_map == NULL)
+                       return -1;
+
+               if (adjust_kernel_syms) {
+                       curr_map->start  = shdr->sh_addr + ref_reloc(kmap);
+                       curr_map->end    = curr_map->start + shdr->sh_size;
+                       curr_map->pgoff  = shdr->sh_offset;
+               } else {
+                       curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
+               }
+               curr_dso->symtab_type = dso->symtab_type;
+               map_groups__insert(kmaps, curr_map);
+               /*
+                * Add it before we drop the referece to curr_map, i.e. while
+                * we still are sure to have a reference to this DSO via
+                * *curr_map->dso.
+                */
+               dsos__add(&map->groups->machine->dsos, curr_dso);
+               /* kmaps already got it */
+               map__put(curr_map);
+               dso__set_loaded(curr_dso);
+               *curr_mapp = curr_map;
+               *curr_dsop = curr_dso;
+       } else
+               *curr_dsop = curr_map->dso;
+
+       return 0;
+}
+
 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                  struct symsrc *runtime_ss, int kmodule)
 {
@@ -844,7 +934,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
         * have the wrong values for the dso maps, so remove them.
         */
        if (kmodule && syms_ss->symtab)
-               symbols__delete(&dso->symbols[map->type]);
+               symbols__delete(&dso->symbols);
 
        if (!syms_ss->symtab) {
                /*
@@ -921,10 +1011,10 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
 
        dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
        /*
-        * Initial kernel and module mappings do not map to the dso.  For
-        * function mappings, flag the fixups.
+        * Initial kernel and module mappings do not map to the dso.
+        * Flag the fixups.
         */
-       if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
+       if (dso->kernel || kmodule) {
                remap_kernel = true;
                adjust_kernel_syms = dso->adjust_symbols;
        }
@@ -936,7 +1026,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                const char *section_name;
                bool used_opd = false;
 
-               if (!is_label && !elf_sym__is_a(&sym, map->type))
+               if (!is_label && !elf_sym__filter(&sym))
                        continue;
 
                /* Reject ARM ELF "mapping symbols": these aren't unique and
@@ -974,7 +1064,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
 
                gelf_getshdr(sec, &shdr);
 
-               if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
+               if (is_label && !elf_sec__filter(&shdr, secstrs))
                        continue;
 
                section_name = elf_sec__name(&shdr, secstrs);
@@ -982,134 +1072,37 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                /* On ARM, symbols for thumb functions have 1 added to
                 * the symbol address as a flag - remove it */
                if ((ehdr.e_machine == EM_ARM) &&
-                   (map->type == MAP__FUNCTION) &&
+                   (GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
                    (sym.st_value & 1))
                        --sym.st_value;
 
                if (dso->kernel || kmodule) {
-                       char dso_name[PATH_MAX];
-
-                       /* Adjust symbol to map to file offset */
-                       if (adjust_kernel_syms)
-                               sym.st_value -= shdr.sh_addr - shdr.sh_offset;
-
-                       if (strcmp(section_name,
-                                  (curr_dso->short_name +
-                                   dso->short_name_len)) == 0)
-                               goto new_symbol;
-
-                       if (strcmp(section_name, ".text") == 0) {
-                               /*
-                                * The initial kernel mapping is based on
-                                * kallsyms and identity maps.  Overwrite it to
-                                * map to the kernel dso.
-                                */
-                               if (remap_kernel && dso->kernel) {
-                                       remap_kernel = false;
-                                       map->start = shdr.sh_addr +
-                                                    ref_reloc(kmap);
-                                       map->end = map->start + shdr.sh_size;
-                                       map->pgoff = shdr.sh_offset;
-                                       map->map_ip = map__map_ip;
-                                       map->unmap_ip = map__unmap_ip;
-                                       /* Ensure maps are correctly ordered */
-                                       if (kmaps) {
-                                               map__get(map);
-                                               map_groups__remove(kmaps, map);
-                                               map_groups__insert(kmaps, map);
-                                               map__put(map);
-                                       }
-                               }
-
-                               /*
-                                * The initial module mapping is based on
-                                * /proc/modules mapped to offset zero.
-                                * Overwrite it to map to the module dso.
-                                */
-                               if (remap_kernel && kmodule) {
-                                       remap_kernel = false;
-                                       map->pgoff = shdr.sh_offset;
-                               }
-
-                               curr_map = map;
-                               curr_dso = dso;
-                               goto new_symbol;
-                       }
-
-                       if (!kmap)
-                               goto new_symbol;
-
-                       snprintf(dso_name, sizeof(dso_name),
-                                "%s%s", dso->short_name, section_name);
-
-                       curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
-                       if (curr_map == NULL) {
-                               u64 start = sym.st_value;
-
-                               if (kmodule)
-                                       start += map->start + shdr.sh_offset;
-
-                               curr_dso = dso__new(dso_name);
-                               if (curr_dso == NULL)
-                                       goto out_elf_end;
-                               curr_dso->kernel = dso->kernel;
-                               curr_dso->long_name = dso->long_name;
-                               curr_dso->long_name_len = dso->long_name_len;
-                               curr_map = map__new2(start, curr_dso,
-                                                    map->type);
-                               dso__put(curr_dso);
-                               if (curr_map == NULL) {
-                                       goto out_elf_end;
-                               }
-                               if (adjust_kernel_syms) {
-                                       curr_map->start = shdr.sh_addr +
-                                                         ref_reloc(kmap);
-                                       curr_map->end = curr_map->start +
-                                                       shdr.sh_size;
-                                       curr_map->pgoff = shdr.sh_offset;
-                               } else {
-                                       curr_map->map_ip = identity__map_ip;
-                                       curr_map->unmap_ip = identity__map_ip;
-                               }
-                               curr_dso->symtab_type = dso->symtab_type;
-                               map_groups__insert(kmaps, curr_map);
-                               /*
-                                * Add it before we drop the referece to curr_map,
-                                * i.e. while we still are sure to have a reference
-                                * to this DSO via curr_map->dso.
-                                */
-                               dsos__add(&map->groups->machine->dsos, curr_dso);
-                               /* kmaps already got it */
-                               map__put(curr_map);
-                               dso__set_loaded(curr_dso, map->type);
-                       } else
-                               curr_dso = curr_map->dso;
-
-                       goto new_symbol;
-               }
-
-               if ((used_opd && runtime_ss->adjust_symbols)
-                               || (!used_opd && syms_ss->adjust_symbols)) {
+                       if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
+                                                      section_name, adjust_kernel_syms, kmodule, &remap_kernel))
+                               goto out_elf_end;
+               } else if ((used_opd && runtime_ss->adjust_symbols) ||
+                          (!used_opd && syms_ss->adjust_symbols)) {
                        pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
                                  "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
                                  (u64)sym.st_value, (u64)shdr.sh_addr,
                                  (u64)shdr.sh_offset);
                        sym.st_value -= shdr.sh_addr - shdr.sh_offset;
                }
-new_symbol:
+
                demangled = demangle_sym(dso, kmodule, elf_name);
                if (demangled != NULL)
                        elf_name = demangled;
 
                f = symbol__new(sym.st_value, sym.st_size,
-                               GELF_ST_BIND(sym.st_info), elf_name);
+                               GELF_ST_BIND(sym.st_info),
+                               GELF_ST_TYPE(sym.st_info), elf_name);
                free(demangled);
                if (!f)
                        goto out_elf_end;
 
                arch__sym_update(f, &sym);
 
-               __symbols__insert(&curr_dso->symbols[curr_map->type], f, dso->kernel);
+               __symbols__insert(&curr_dso->symbols, f, dso->kernel);
                nr++;
        }
 
@@ -1117,14 +1110,14 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
         * For misannotated, zeroed, ASM function sizes.
         */
        if (nr > 0) {
-               symbols__fixup_end(&dso->symbols[map->type]);
-               symbols__fixup_duplicate(&dso->symbols[map->type]);
+               symbols__fixup_end(&dso->symbols);
+               symbols__fixup_duplicate(&dso->symbols);
                if (kmap) {
                        /*
                         * We need to fixup this here too because we create new
                         * maps here, for things like vsyscall sections.
                         */
-                       __map_groups__fixup_end(kmaps, map->type);
+                       map_groups__fixup_end(kmaps);
                }
        }
        err = nr;
@@ -1393,8 +1386,16 @@ static off_t kcore__write(struct kcore *kcore)
 
 struct phdr_data {
        off_t offset;
+       off_t rel;
        u64 addr;
        u64 len;
+       struct list_head node;
+       struct phdr_data *remaps;
+};
+
+struct sym_data {
+       u64 addr;
+       struct list_head node;
 };
 
 struct kcore_copy_info {
@@ -1404,16 +1405,78 @@ struct kcore_copy_info {
        u64 last_symbol;
        u64 first_module;
        u64 last_module_symbol;
-       struct phdr_data kernel_map;
-       struct phdr_data modules_map;
+       size_t phnum;
+       struct list_head phdrs;
+       struct list_head syms;
 };
 
+#define kcore_copy__for_each_phdr(k, p) \
+       list_for_each_entry((p), &(k)->phdrs, node)
+
+static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
+{
+       struct phdr_data *p = zalloc(sizeof(*p));
+
+       if (p) {
+               p->addr   = addr;
+               p->len    = len;
+               p->offset = offset;
+       }
+
+       return p;
+}
+
+static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
+                                                u64 addr, u64 len,
+                                                off_t offset)
+{
+       struct phdr_data *p = phdr_data__new(addr, len, offset);
+
+       if (p)
+               list_add_tail(&p->node, &kci->phdrs);
+
+       return p;
+}
+
+static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
+{
+       struct phdr_data *p, *tmp;
+
+       list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
+               list_del(&p->node);
+               free(p);
+       }
+}
+
+static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
+                                           u64 addr)
+{
+       struct sym_data *s = zalloc(sizeof(*s));
+
+       if (s) {
+               s->addr = addr;
+               list_add_tail(&s->node, &kci->syms);
+       }
+
+       return s;
+}
+
+static void kcore_copy__free_syms(struct kcore_copy_info *kci)
+{
+       struct sym_data *s, *tmp;
+
+       list_for_each_entry_safe(s, tmp, &kci->syms, node) {
+               list_del(&s->node);
+               free(s);
+       }
+}
+
 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
                                        u64 start)
 {
        struct kcore_copy_info *kci = arg;
 
-       if (!symbol_type__is_a(type, MAP__FUNCTION))
+       if (!kallsyms__is_function(type))
                return 0;
 
        if (strchr(name, '[')) {
@@ -1438,6 +1501,9 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
                return 0;
        }
 
+       if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
+               return -1;
+
        return 0;
 }
 
@@ -1487,27 +1553,39 @@ static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
        return 0;
 }
 
-static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
-                           u64 s, u64 e)
+static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
+                          u64 pgoff, u64 s, u64 e)
 {
-       if (p->addr || s < start || s >= end)
-               return;
+       u64 len, offset;
+
+       if (s < start || s >= end)
+               return 0;
 
-       p->addr = s;
-       p->offset = (s - start) + pgoff;
-       p->len = e < end ? e - s : end - s;
+       offset = (s - start) + pgoff;
+       len = e < end ? e - s : end - s;
+
+       return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
 }
 
 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
 {
        struct kcore_copy_info *kci = data;
        u64 end = start + len;
+       struct sym_data *sdat;
 
-       kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
-                       kci->etext);
+       if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
+               return -1;
 
-       kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
-                       kci->last_module_symbol);
+       if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
+                           kci->last_module_symbol))
+               return -1;
+
+       list_for_each_entry(sdat, &kci->syms, node) {
+               u64 s = round_down(sdat->addr, page_size);
+
+               if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
+                       return -1;
+       }
 
        return 0;
 }
@@ -1520,6 +1598,64 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
        return 0;
 }
 
+static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
+{
+       struct phdr_data *p, *k = NULL;
+       u64 kend;
+
+       if (!kci->stext)
+               return;
+
+       /* Find phdr that corresponds to the kernel map (contains stext) */
+       kcore_copy__for_each_phdr(kci, p) {
+               u64 pend = p->addr + p->len - 1;
+
+               if (p->addr <= kci->stext && pend >= kci->stext) {
+                       k = p;
+                       break;
+               }
+       }
+
+       if (!k)
+               return;
+
+       kend = k->offset + k->len;
+
+       /* Find phdrs that remap the kernel */
+       kcore_copy__for_each_phdr(kci, p) {
+               u64 pend = p->offset + p->len;
+
+               if (p == k)
+                       continue;
+
+               if (p->offset >= k->offset && pend <= kend)
+                       p->remaps = k;
+       }
+}
+
+static void kcore_copy__layout(struct kcore_copy_info *kci)
+{
+       struct phdr_data *p;
+       off_t rel = 0;
+
+       kcore_copy__find_remaps(kci);
+
+       kcore_copy__for_each_phdr(kci, p) {
+               if (!p->remaps) {
+                       p->rel = rel;
+                       rel += p->len;
+               }
+               kci->phnum += 1;
+       }
+
+       kcore_copy__for_each_phdr(kci, p) {
+               struct phdr_data *k = p->remaps;
+
+               if (k)
+                       p->rel = p->offset - k->offset + k->rel;
+       }
+}
+
 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
                                 Elf *elf)
 {
@@ -1555,7 +1691,12 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
        if (kci->first_module && !kci->last_module_symbol)
                return -1;
 
-       return kcore_copy__read_maps(kci, elf);
+       if (kcore_copy__read_maps(kci, elf))
+               return -1;
+
+       kcore_copy__layout(kci);
+
+       return 0;
 }
 
 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
@@ -1678,12 +1819,15 @@ int kcore_copy(const char *from_dir, const char *to_dir)
 {
        struct kcore kcore;
        struct kcore extract;
-       size_t count = 2;
        int idx = 0, err = -1;
-       off_t offset = page_size, sz, modules_offset = 0;
+       off_t offset, sz;
        struct kcore_copy_info kci = { .stext = 0, };
        char kcore_filename[PATH_MAX];
        char extract_filename[PATH_MAX];
+       struct phdr_data *p;
+
+       INIT_LIST_HEAD(&kci.phdrs);
+       INIT_LIST_HEAD(&kci.syms);
 
        if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
                return -1;
@@ -1703,20 +1847,17 @@ int kcore_copy(const char *from_dir, const char *to_dir)
        if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
                goto out_kcore_close;
 
-       if (!kci.modules_map.addr)
-               count -= 1;
-
-       if (kcore__copy_hdr(&kcore, &extract, count))
+       if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
                goto out_extract_close;
 
-       if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
-                           kci.kernel_map.len))
-               goto out_extract_close;
+       offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
+                gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
+       offset = round_up(offset, page_size);
+
+       kcore_copy__for_each_phdr(&kci, p) {
+               off_t offs = p->rel + offset;
 
-       if (kci.modules_map.addr) {
-               modules_offset = offset + kci.kernel_map.len;
-               if (kcore__add_phdr(&extract, idx, modules_offset,
-                                   kci.modules_map.addr, kci.modules_map.len))
+               if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
                        goto out_extract_close;
        }
 
@@ -1724,14 +1865,14 @@ int kcore_copy(const char *from_dir, const char *to_dir)
        if (sz < 0 || sz > offset)
                goto out_extract_close;
 
-       if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
-                      kci.kernel_map.len))
-               goto out_extract_close;
+       kcore_copy__for_each_phdr(&kci, p) {
+               off_t offs = p->rel + offset;
 
-       if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
-                                        extract.fd, modules_offset,
-                                        kci.modules_map.len))
-               goto out_extract_close;
+               if (p->remaps)
+                       continue;
+               if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
+                       goto out_extract_close;
+       }
 
        if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
                goto out_extract_close;
@@ -1754,6 +1895,9 @@ int kcore_copy(const char *from_dir, const char *to_dir)
        if (err)
                kcore_copy__unlink(to_dir, "kallsyms");
 
+       kcore_copy__free_phdrs(&kci);
+       kcore_copy__free_syms(&kci);
+
        return err;
 }
 
index ff48d0d49584cdc10969f20d892784b0a11a7e01..7119df77dc0b4da31a9df58a89b952cf155bf430 100644 (file)
@@ -288,8 +288,7 @@ void symsrc__destroy(struct symsrc *ss)
 }
 
 int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused,
-                               struct symsrc *ss __maybe_unused,
-                               struct map *map __maybe_unused)
+                               struct symsrc *ss __maybe_unused)
 {
        return 0;
 }
index 1466814ebada5d30d3eb2dad96a6345c10eee6af..8c84437f2a100d7812074f40ccc34bcb0dfb8f13 100644 (file)
@@ -5,6 +5,7 @@
 #include <stdio.h>
 #include <string.h>
 #include <linux/kernel.h>
+#include <linux/mman.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/param.h>
@@ -70,18 +71,10 @@ static enum dso_binary_type binary_type_symtab[] = {
 
 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
 
-bool symbol_type__is_a(char symbol_type, enum map_type map_type)
+static bool symbol_type__filter(char symbol_type)
 {
        symbol_type = toupper(symbol_type);
-
-       switch (map_type) {
-       case MAP__FUNCTION:
-               return symbol_type == 'T' || symbol_type == 'W';
-       case MAP__VARIABLE:
-               return symbol_type == 'D';
-       default:
-               return false;
-       }
+       return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D';
 }
 
 static int prefix_underscores_count(const char *str)
@@ -228,9 +221,9 @@ void symbols__fixup_end(struct rb_root *symbols)
                curr->end = roundup(curr->start, 4096) + 4096;
 }
 
-void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
+void map_groups__fixup_end(struct map_groups *mg)
 {
-       struct maps *maps = &mg->maps[type];
+       struct maps *maps = &mg->maps;
        struct map *next, *curr;
 
        down_write(&maps->lock);
@@ -256,7 +249,7 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
        up_write(&maps->lock);
 }
 
-struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
+struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
 {
        size_t namelen = strlen(name) + 1;
        struct symbol *sym = calloc(1, (symbol_conf.priv_size +
@@ -274,6 +267,7 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
 
        sym->start   = start;
        sym->end     = len ? start + len : start;
+       sym->type    = type;
        sym->binding = binding;
        sym->namelen = namelen - 1;
 
@@ -484,45 +478,40 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
 
 void dso__reset_find_symbol_cache(struct dso *dso)
 {
-       enum map_type type;
-
-       for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) {
-               dso->last_find_result[type].addr   = 0;
-               dso->last_find_result[type].symbol = NULL;
-       }
+       dso->last_find_result.addr   = 0;
+       dso->last_find_result.symbol = NULL;
 }
 
-void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym)
+void dso__insert_symbol(struct dso *dso, struct symbol *sym)
 {
-       __symbols__insert(&dso->symbols[type], sym, dso->kernel);
+       __symbols__insert(&dso->symbols, sym, dso->kernel);
 
        /* update the symbol cache if necessary */
-       if (dso->last_find_result[type].addr >= sym->start &&
-           (dso->last_find_result[type].addr < sym->end ||
+       if (dso->last_find_result.addr >= sym->start &&
+           (dso->last_find_result.addr < sym->end ||
            sym->start == sym->end)) {
-               dso->last_find_result[type].symbol = sym;
+               dso->last_find_result.symbol = sym;
        }
 }
 
-struct symbol *dso__find_symbol(struct dso *dso,
-                               enum map_type type, u64 addr)
+struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
 {
-       if (dso->last_find_result[type].addr != addr || dso->last_find_result[type].symbol == NULL) {
-               dso->last_find_result[type].addr   = addr;
-               dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr);
+       if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
+               dso->last_find_result.addr   = addr;
+               dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
        }
 
-       return dso->last_find_result[type].symbol;
+       return dso->last_find_result.symbol;
 }
 
-struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
+struct symbol *dso__first_symbol(struct dso *dso)
 {
-       return symbols__first(&dso->symbols[type]);
+       return symbols__first(&dso->symbols);
 }
 
-struct symbol *dso__last_symbol(struct dso *dso, enum map_type type)
+struct symbol *dso__last_symbol(struct dso *dso)
 {
-       return symbols__last(&dso->symbols[type]);
+       return symbols__last(&dso->symbols);
 }
 
 struct symbol *dso__next_symbol(struct symbol *sym)
@@ -539,24 +528,22 @@ struct symbol *symbol__next_by_name(struct symbol *sym)
 }
 
  /*
-  * Teturns first symbol that matched with @name.
+  * Returns first symbol that matched with @name.
   */
-struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
-                                       const char *name)
+struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
 {
-       struct symbol *s = symbols__find_by_name(&dso->symbol_names[type], name,
+       struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
                                                 SYMBOL_TAG_INCLUDE__NONE);
        if (!s)
-               s = symbols__find_by_name(&dso->symbol_names[type], name,
+               s = symbols__find_by_name(&dso->symbol_names, name,
                                          SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
        return s;
 }
 
-void dso__sort_by_name(struct dso *dso, enum map_type type)
+void dso__sort_by_name(struct dso *dso)
 {
-       dso__set_sorted_by_name(dso, type);
-       return symbols__sort_by_name(&dso->symbol_names[type],
-                                    &dso->symbols[type]);
+       dso__set_sorted_by_name(dso);
+       return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
 }
 
 int modules__parse(const char *filename, void *arg,
@@ -621,11 +608,6 @@ int modules__parse(const char *filename, void *arg,
        return err;
 }
 
-struct process_kallsyms_args {
-       struct map *map;
-       struct dso *dso;
-};
-
 /*
  * These are symbols in the kernel image, so make sure that
  * sym is from a kernel DSO.
@@ -661,10 +643,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
                                       char type, u64 start)
 {
        struct symbol *sym;
-       struct process_kallsyms_args *a = arg;
-       struct rb_root *root = &a->dso->symbols[a->map->type];
+       struct dso *dso = arg;
+       struct rb_root *root = &dso->symbols;
 
-       if (!symbol_type__is_a(type, a->map->type))
+       if (!symbol_type__filter(type))
                return 0;
 
        /*
@@ -672,7 +654,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
         * symbols, setting length to 0, and rely on
         * symbols__fixup_end() to fix it up.
         */
-       sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
+       sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
        if (sym == NULL)
                return -ENOMEM;
        /*
@@ -689,21 +671,18 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
  * so that we can in the next step set the symbol ->end address and then
  * call kernel_maps__split_kallsyms.
  */
-static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
-                                 struct map *map)
+static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
 {
-       struct process_kallsyms_args args = { .map = map, .dso = dso, };
-       return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
+       return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
 }
 
-static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
+static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
 {
-       struct map_groups *kmaps = map__kmaps(map);
        struct map *curr_map;
        struct symbol *pos;
        int count = 0;
-       struct rb_root old_root = dso->symbols[map->type];
-       struct rb_root *root = &dso->symbols[map->type];
+       struct rb_root old_root = dso->symbols;
+       struct rb_root *root = &dso->symbols;
        struct rb_node *next = rb_first(root);
 
        if (!kmaps)
@@ -723,7 +702,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
                if (module)
                        *module = '\0';
 
-               curr_map = map_groups__find(kmaps, map->type, pos->start);
+               curr_map = map_groups__find(kmaps, pos->start);
 
                if (!curr_map) {
                        symbol__delete(pos);
@@ -733,7 +712,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
                pos->start -= curr_map->start - curr_map->pgoff;
                if (pos->end)
                        pos->end -= curr_map->start - curr_map->pgoff;
-               symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
+               symbols__insert(&curr_map->dso->symbols, pos);
                ++count;
        }
 
@@ -748,22 +727,25 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
  * kernel range is broken in several maps, named [kernel].N, as we don't have
  * the original ELF section names vmlinux have.
  */
-static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
+static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
+                                     struct map *initial_map)
 {
-       struct map_groups *kmaps = map__kmaps(map);
        struct machine *machine;
-       struct map *curr_map = map;
+       struct map *curr_map = initial_map;
        struct symbol *pos;
        int count = 0, moved = 0;
-       struct rb_root *root = &dso->symbols[map->type];
+       struct rb_root *root = &dso->symbols;
        struct rb_node *next = rb_first(root);
        int kernel_range = 0;
+       bool x86_64;
 
        if (!kmaps)
                return -1;
 
        machine = kmaps->machine;
 
+       x86_64 = machine__is(machine, "x86_64");
+
        while (next) {
                char *module;
 
@@ -778,7 +760,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
                        *module++ = '\0';
 
                        if (strcmp(curr_map->dso->short_name, module)) {
-                               if (curr_map != map &&
+                               if (curr_map != initial_map &&
                                    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
                                    machine__is_default_guest(machine)) {
                                        /*
@@ -788,18 +770,16 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
                                         * symbols are in its kmap. Mark it as
                                         * loaded.
                                         */
-                                       dso__set_loaded(curr_map->dso,
-                                                       curr_map->type);
+                                       dso__set_loaded(curr_map->dso);
                                }
 
-                               curr_map = map_groups__find_by_name(kmaps,
-                                                       map->type, module);
+                               curr_map = map_groups__find_by_name(kmaps, module);
                                if (curr_map == NULL) {
                                        pr_debug("%s/proc/{kallsyms,modules} "
                                                 "inconsistency while looking "
                                                 "for \"%s\" module!\n",
                                                 machine->root_dir, module);
-                                       curr_map = map;
+                                       curr_map = initial_map;
                                        goto discard_symbol;
                                }
 
@@ -809,11 +789,21 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
                        }
                        /*
                         * So that we look just like we get from .ko files,
-                        * i.e. not prelinked, relative to map->start.
+                        * i.e. not prelinked, relative to initial_map->start.
                         */
                        pos->start = curr_map->map_ip(curr_map, pos->start);
                        pos->end   = curr_map->map_ip(curr_map, pos->end);
-               } else if (curr_map != map) {
+               } else if (x86_64 && is_entry_trampoline(pos->name)) {
+                       /*
+                        * These symbols are not needed anymore since the
+                        * trampoline maps refer to the text section and it's
+                        * symbols instead. Avoid having to deal with
+                        * relocations, and the assumption that the first symbol
+                        * is the start of kernel text, by simply removing the
+                        * symbols at this point.
+                        */
+                       goto discard_symbol;
+               } else if (curr_map != initial_map) {
                        char dso_name[PATH_MAX];
                        struct dso *ndso;
 
@@ -824,7 +814,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
                        }
 
                        if (count == 0) {
-                               curr_map = map;
+                               curr_map = initial_map;
                                goto add_symbol;
                        }
 
@@ -843,7 +833,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
 
                        ndso->kernel = dso->kernel;
 
-                       curr_map = map__new2(pos->start, ndso, map->type);
+                       curr_map = map__new2(pos->start, ndso);
                        if (curr_map == NULL) {
                                dso__put(ndso);
                                return -1;
@@ -858,9 +848,9 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
                        pos->end -= delta;
                }
 add_symbol:
-               if (curr_map != map) {
+               if (curr_map != initial_map) {
                        rb_erase(&pos->rb_node, root);
-                       symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
+                       symbols__insert(&curr_map->dso->symbols, pos);
                        ++moved;
                } else
                        ++count;
@@ -871,10 +861,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
                symbol__delete(pos);
        }
 
-       if (curr_map != map &&
+       if (curr_map != initial_map &&
            dso->kernel == DSO_TYPE_GUEST_KERNEL &&
            machine__is_default_guest(kmaps->machine)) {
-               dso__set_loaded(curr_map->dso, curr_map->type);
+               dso__set_loaded(curr_map->dso);
        }
 
        return count + moved;
@@ -1035,7 +1025,12 @@ int compare_proc_modules(const char *from, const char *to)
        return ret;
 }
 
-static int do_validate_kcore_modules(const char *filename, struct map *map,
+struct map *map_groups__first(struct map_groups *mg)
+{
+       return maps__first(&mg->maps);
+}
+
+static int do_validate_kcore_modules(const char *filename,
                                  struct map_groups *kmaps)
 {
        struct rb_root modules = RB_ROOT;
@@ -1046,13 +1041,12 @@ static int do_validate_kcore_modules(const char *filename, struct map *map,
        if (err)
                return err;
 
-       old_map = map_groups__first(kmaps, map->type);
+       old_map = map_groups__first(kmaps);
        while (old_map) {
                struct map *next = map_groups__next(old_map);
                struct module_info *mi;
 
-               if (old_map == map || old_map->start == map->start) {
-                       /* The kernel map */
+               if (!__map__is_kmodule(old_map)) {
                        old_map = next;
                        continue;
                }
@@ -1109,7 +1103,7 @@ static int validate_kcore_modules(const char *kallsyms_filename,
                                             kallsyms_filename))
                return -EINVAL;
 
-       if (do_validate_kcore_modules(modules_filename, map, kmaps))
+       if (do_validate_kcore_modules(modules_filename, kmaps))
                return -EINVAL;
 
        return 0;
@@ -1138,7 +1132,6 @@ static int validate_kcore_addresses(const char *kallsyms_filename,
 
 struct kcore_mapfn_data {
        struct dso *dso;
-       enum map_type type;
        struct list_head maps;
 };
 
@@ -1147,7 +1140,7 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
        struct kcore_mapfn_data *md = data;
        struct map *map;
 
-       map = map__new2(start, md->dso, md->type);
+       map = map__new2(start, md->dso);
        if (map == NULL)
                return -ENOMEM;
 
@@ -1163,13 +1156,13 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
                           const char *kallsyms_filename)
 {
        struct map_groups *kmaps = map__kmaps(map);
-       struct machine *machine;
        struct kcore_mapfn_data md;
        struct map *old_map, *new_map, *replacement_map = NULL;
+       struct machine *machine;
        bool is_64_bit;
        int err, fd;
        char kcore_filename[PATH_MAX];
-       struct symbol *sym;
+       u64 stext;
 
        if (!kmaps)
                return -EINVAL;
@@ -1177,7 +1170,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
        machine = kmaps->machine;
 
        /* This function requires that the map is the kernel map */
-       if (map != machine->vmlinux_maps[map->type])
+       if (!__map__is_kernel(map))
                return -EINVAL;
 
        if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
@@ -1189,7 +1182,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
                return -EINVAL;
 
        md.dso = dso;
-       md.type = map->type;
        INIT_LIST_HEAD(&md.maps);
 
        fd = open(kcore_filename, O_RDONLY);
@@ -1200,7 +1192,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
        }
 
        /* Read new maps into temporary lists */
-       err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
+       err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
                              &is_64_bit);
        if (err)
                goto out_err;
@@ -1212,7 +1204,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
        }
 
        /* Remove old maps */
-       old_map = map_groups__first(kmaps, map->type);
+       old_map = map_groups__first(kmaps);
        while (old_map) {
                struct map *next = map_groups__next(old_map);
 
@@ -1220,14 +1212,15 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
                        map_groups__remove(kmaps, old_map);
                old_map = next;
        }
+       machine->trampolines_mapped = false;
 
-       /* Find the kernel map using the first symbol */
-       sym = dso__first_symbol(dso, map->type);
-       list_for_each_entry(new_map, &md.maps, node) {
-               if (sym && sym->start >= new_map->start &&
-                   sym->start < new_map->end) {
-                       replacement_map = new_map;
-                       break;
+       /* Find the kernel map using the '_stext' symbol */
+       if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
+               list_for_each_entry(new_map, &md.maps, node) {
+                       if (stext >= new_map->start && stext < new_map->end) {
+                               replacement_map = new_map;
+                               break;
+                       }
                }
        }
 
@@ -1256,6 +1249,19 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
                map__put(new_map);
        }
 
+       if (machine__is(machine, "x86_64")) {
+               u64 addr;
+
+               /*
+                * If one of the corresponding symbols is there, assume the
+                * entry trampoline maps are too.
+                */
+               if (!kallsyms__get_function_start(kallsyms_filename,
+                                                 ENTRY_TRAMPOLINE_NAME,
+                                                 &addr))
+                       machine->trampolines_mapped = true;
+       }
+
        /*
         * Set the data type and long name so that kcore can be read via
         * dso__data_read_addr().
@@ -1268,7 +1274,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
 
        close(fd);
 
-       if (map->type == MAP__FUNCTION)
+       if (map->prot & PROT_EXEC)
                pr_debug("Using %s for kernel object code\n", kcore_filename);
        else
                pr_debug("Using %s for kernel data\n", kcore_filename);
@@ -1289,14 +1295,10 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
  * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
  * delta based on the relocation reference symbol.
  */
-static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
+static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
 {
-       struct kmap *kmap = map__kmap(map);
        u64 addr;
 
-       if (!kmap)
-               return -1;
-
        if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
                return 0;
 
@@ -1310,19 +1312,23 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
 int __dso__load_kallsyms(struct dso *dso, const char *filename,
                         struct map *map, bool no_kcore)
 {
+       struct kmap *kmap = map__kmap(map);
        u64 delta = 0;
 
        if (symbol__restricted_filename(filename, "/proc/kallsyms"))
                return -1;
 
-       if (dso__load_all_kallsyms(dso, filename, map) < 0)
+       if (!kmap || !kmap->kmaps)
                return -1;
 
-       if (kallsyms__delta(map, filename, &delta))
+       if (dso__load_all_kallsyms(dso, filename) < 0)
                return -1;
 
-       symbols__fixup_end(&dso->symbols[map->type]);
-       symbols__fixup_duplicate(&dso->symbols[map->type]);
+       if (kallsyms__delta(kmap, filename, &delta))
+               return -1;
+
+       symbols__fixup_end(&dso->symbols);
+       symbols__fixup_duplicate(&dso->symbols);
 
        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
                dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
@@ -1330,9 +1336,9 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
                dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
 
        if (!no_kcore && !dso__load_kcore(dso, map, filename))
-               return dso__split_kallsyms_for_kcore(dso, map);
+               return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
        else
-               return dso__split_kallsyms(dso, map, delta);
+               return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
 }
 
 int dso__load_kallsyms(struct dso *dso, const char *filename,
@@ -1341,8 +1347,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
        return __dso__load_kallsyms(dso, filename, map, false);
 }
 
-static int dso__load_perf_map(const char *map_path, struct dso *dso,
-                             struct map *map)
+static int dso__load_perf_map(const char *map_path, struct dso *dso)
 {
        char *line = NULL;
        size_t n;
@@ -1379,12 +1384,12 @@ static int dso__load_perf_map(const char *map_path, struct dso *dso,
                if (len + 2 >= line_len)
                        continue;
 
-               sym = symbol__new(start, size, STB_GLOBAL, line + len);
+               sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
 
                if (sym == NULL)
                        goto out_delete_line;
 
-               symbols__insert(&dso->symbols[map->type], sym);
+               symbols__insert(&dso->symbols, sym);
                nr_syms++;
        }
 
@@ -1509,25 +1514,27 @@ int dso__load(struct dso *dso, struct map *map)
        pthread_mutex_lock(&dso->lock);
 
        /* check again under the dso->lock */
-       if (dso__loaded(dso, map->type)) {
+       if (dso__loaded(dso)) {
                ret = 1;
                goto out;
        }
 
+       if (map->groups && map->groups->machine)
+               machine = map->groups->machine;
+       else
+               machine = NULL;
+
        if (dso->kernel) {
                if (dso->kernel == DSO_TYPE_KERNEL)
                        ret = dso__load_kernel_sym(dso, map);
                else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
                        ret = dso__load_guest_kernel_sym(dso, map);
 
+               if (machine__is(machine, "x86_64"))
+                       machine__map_x86_64_entry_trampolines(machine, dso);
                goto out;
        }
 
-       if (map->groups && map->groups->machine)
-               machine = map->groups->machine;
-       else
-               machine = NULL;
-
        dso->adjust_symbols = 0;
 
        if (perfmap) {
@@ -1542,7 +1549,7 @@ int dso__load(struct dso *dso, struct map *map)
                        goto out;
                }
 
-               ret = dso__load_perf_map(map_path, dso, map);
+               ret = dso__load_perf_map(map_path, dso);
                dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
                                             DSO_BINARY_TYPE__NOT_FOUND;
                goto out;
@@ -1651,7 +1658,7 @@ int dso__load(struct dso *dso, struct map *map)
        if (ret > 0) {
                int nr_plt;
 
-               nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map);
+               nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
                if (nr_plt > 0)
                        ret += nr_plt;
        }
@@ -1663,17 +1670,16 @@ int dso__load(struct dso *dso, struct map *map)
        if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
                ret = 0;
 out:
-       dso__set_loaded(dso, map->type);
+       dso__set_loaded(dso);
        pthread_mutex_unlock(&dso->lock);
        nsinfo__mountns_exit(&nsc);
 
        return ret;
 }
 
-struct map *map_groups__find_by_name(struct map_groups *mg,
-                                    enum map_type type, const char *name)
+struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
 {
-       struct maps *maps = &mg->maps[type];
+       struct maps *maps = &mg->maps;
        struct map *map;
 
        down_read(&maps->lock);
@@ -1720,7 +1726,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
                else
                        dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
                dso__set_long_name(dso, vmlinux, vmlinux_allocated);
-               dso__set_loaded(dso, map->type);
+               dso__set_loaded(dso);
                pr_debug("Using %s for symbols\n", symfs_vmlinux);
        }
 
index 70c16741f50a3b94e02f29d674e6d2856710997f..1a16438eb3cea6de7714aca6a37142b5b9468122 100644 (file)
@@ -57,7 +57,8 @@ struct symbol {
        u64             start;
        u64             end;
        u16             namelen;
-       u8              binding;
+       u8              type:4;
+       u8              binding:4;
        u8              idle:1;
        u8              ignore:1;
        u8              inlined:1;
@@ -259,17 +260,16 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
                         bool no_kcore);
 int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map);
 
-void dso__insert_symbol(struct dso *dso, enum map_type type,
+void dso__insert_symbol(struct dso *dso,
                        struct symbol *sym);
 
-struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
-                               u64 addr);
-struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
-                                       const char *name);
+struct symbol *dso__find_symbol(struct dso *dso, u64 addr);
+struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name);
+
 struct symbol *symbol__next_by_name(struct symbol *sym);
 
-struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
-struct symbol *dso__last_symbol(struct dso *dso, enum map_type type);
+struct symbol *dso__first_symbol(struct dso *dso);
+struct symbol *dso__last_symbol(struct dso *dso);
 struct symbol *dso__next_symbol(struct symbol *sym);
 
 enum dso_type dso__type_fd(int fd);
@@ -288,7 +288,7 @@ void symbol__exit(void);
 void symbol__elf_init(void);
 int symbol__annotation_init(void);
 
-struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
+struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name);
 size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
                                      const struct addr_location *al,
                                      bool unknown_as_addr,
@@ -300,7 +300,6 @@ size_t __symbol__fprintf_symname(const struct symbol *sym,
                                 bool unknown_as_addr, FILE *fp);
 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
 size_t symbol__fprintf(struct symbol *sym, FILE *fp);
-bool symbol_type__is_a(char symbol_type, enum map_type map_type);
 bool symbol__restricted_filename(const char *filename,
                                 const char *restricted_filename);
 int symbol__config_symfs(const struct option *opt __maybe_unused,
@@ -308,8 +307,7 @@ int symbol__config_symfs(const struct option *opt __maybe_unused,
 
 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                  struct symsrc *runtime_ss, int kmodule);
-int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss,
-                               struct map *map);
+int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss);
 
 char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name);
 
@@ -317,7 +315,7 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
 void symbols__insert(struct rb_root *symbols, struct symbol *sym);
 void symbols__fixup_duplicate(struct rb_root *symbols);
 void symbols__fixup_end(struct rb_root *symbols);
-void __map_groups__fixup_end(struct map_groups *mg, enum map_type type);
+void map_groups__fixup_end(struct map_groups *mg);
 
 typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
index 6dd2cb88ccbeab83286bf709c9a0146b0aa42bf3..ed0205cc794263e71a741fd64a79f612245c8e52 100644 (file)
@@ -58,13 +58,13 @@ size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
 }
 
 size_t dso__fprintf_symbols_by_name(struct dso *dso,
-                                   enum map_type type, FILE *fp)
+                                   FILE *fp)
 {
        size_t ret = 0;
        struct rb_node *nd;
        struct symbol_name_rb_node *pos;
 
-       for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) {
                pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
                fprintf(fp, "%s\n", pos->sym.name);
        }
index 68b65b10579bcce74ef5cdb99b8a1b43501ee88b..2048d393ece6f24b19f896e75173420f47d22e48 100644 (file)
@@ -302,23 +302,20 @@ int thread__insert_map(struct thread *thread, struct map *map)
 static int __thread__prepare_access(struct thread *thread)
 {
        bool initialized = false;
-       int i, err = 0;
-
-       for (i = 0; i < MAP__NR_TYPES; ++i) {
-               struct maps *maps = &thread->mg->maps[i];
-               struct map *map;
+       int err = 0;
+       struct maps *maps = &thread->mg->maps;
+       struct map *map;
 
-               down_read(&maps->lock);
+       down_read(&maps->lock);
 
-               for (map = maps__first(maps); map; map = map__next(map)) {
-                       err = unwind__prepare_access(thread, map, &initialized);
-                       if (err || initialized)
-                               break;
-               }
-
-               up_read(&maps->lock);
+       for (map = maps__first(maps); map; map = map__next(map)) {
+               err = unwind__prepare_access(thread, map, &initialized);
+               if (err || initialized)
+                       break;
        }
 
+       up_read(&maps->lock);
+
        return err;
 }
 
@@ -335,8 +332,6 @@ static int thread__prepare_access(struct thread *thread)
 static int thread__clone_map_groups(struct thread *thread,
                                    struct thread *parent)
 {
-       int i;
-
        /* This is new thread, we share map groups for process. */
        if (thread->pid_ == parent->pid_)
                return thread__prepare_access(thread);
@@ -348,9 +343,8 @@ static int thread__clone_map_groups(struct thread *thread,
        }
 
        /* But this one is new process, copy maps. */
-       for (i = 0; i < MAP__NR_TYPES; ++i)
-               if (map_groups__clone(thread, parent->mg, i) < 0)
-                       return -ENOMEM;
+       if (map_groups__clone(thread, parent->mg) < 0)
+               return -ENOMEM;
 
        return 0;
 }
@@ -371,8 +365,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
        return thread__clone_map_groups(thread, parent);
 }
 
-void thread__find_cpumode_addr_location(struct thread *thread,
-                                       enum map_type type, u64 addr,
+void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
                                        struct addr_location *al)
 {
        size_t i;
@@ -384,7 +377,7 @@ void thread__find_cpumode_addr_location(struct thread *thread,
        };
 
        for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
-               thread__find_addr_location(thread, cpumodes[i], type, addr, al);
+               thread__find_symbol(thread, cpumodes[i], addr, al);
                if (al->map)
                        break;
        }
index 14d44c3235b8982a8fb9bf0eebb0ac85224ef017..07606aa6998d92252b7d63a4632750d9531356f2 100644 (file)
@@ -92,16 +92,13 @@ size_t thread__fprintf(struct thread *thread, FILE *fp);
 
 struct thread *thread__main_thread(struct machine *machine, struct thread *thread);
 
-void thread__find_addr_map(struct thread *thread,
-                          u8 cpumode, enum map_type type, u64 addr,
-                          struct addr_location *al);
+struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
+                            struct addr_location *al);
 
-void thread__find_addr_location(struct thread *thread,
-                               u8 cpumode, enum map_type type, u64 addr,
-                               struct addr_location *al);
+struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
+                                  u64 addr, struct addr_location *al);
 
-void thread__find_cpumode_addr_location(struct thread *thread,
-                                       enum map_type type, u64 addr,
+void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
                                        struct addr_location *al);
 
 static inline void *thread__priv(struct thread *thread)
index d7f2113462fbb97b3fb9e44294cd1be732831a45..c85d0d1a65ed72ffbdf2195004f0cd106602dde7 100644 (file)
@@ -103,11 +103,10 @@ static int record_file(const char *file, ssize_t hdr_sz)
 
 static int record_header_files(void)
 {
-       char *path;
+       char *path = get_events_file("header_page");
        struct stat st;
        int err = -EIO;
 
-       path = get_tracing_file("events/header_page");
        if (!path) {
                pr_debug("can't get tracing/events/header_page");
                return -ENOMEM;
@@ -128,9 +127,9 @@ static int record_header_files(void)
                goto out;
        }
 
-       put_tracing_file(path);
+       put_events_file(path);
 
-       path = get_tracing_file("events/header_event");
+       path = get_events_file("header_event");
        if (!path) {
                pr_debug("can't get tracing/events/header_event");
                err = -ENOMEM;
@@ -154,7 +153,7 @@ static int record_header_files(void)
 
        err = 0;
 out:
-       put_tracing_file(path);
+       put_events_file(path);
        return err;
 }
 
@@ -243,7 +242,7 @@ static int record_ftrace_files(struct tracepoint_path *tps)
        char *path;
        int ret;
 
-       path = get_tracing_file("events/ftrace");
+       path = get_events_file("ftrace");
        if (!path) {
                pr_debug("can't get tracing/events/ftrace");
                return -ENOMEM;
index 16a776371d03228cf68c6fdcd30d5acd7928cf7e..1aa3686032688f95609274918aab97bd7850e14b 100644 (file)
@@ -75,6 +75,7 @@ void trace_event__cleanup(struct trace_event *t)
 static struct event_format*
 tp_format(const char *sys, const char *name)
 {
+       char *tp_dir = get_events_file(sys);
        struct pevent *pevent = tevent.pevent;
        struct event_format *event = NULL;
        char path[PATH_MAX];
@@ -82,8 +83,11 @@ tp_format(const char *sys, const char *name)
        char *data;
        int err;
 
-       scnprintf(path, PATH_MAX, "%s/%s/%s/format",
-                 tracing_events_path, sys, name);
+       if (!tp_dir)
+               return ERR_PTR(-errno);
+
+       scnprintf(path, PATH_MAX, "%s/%s/format", tp_dir, name);
+       put_events_file(tp_dir);
 
        err = filename__read_str(path, &data, &size);
        if (err)
index 7bdd239c795c16f71ea5a25bf4e6085060feacbd..538db4e5d1e69c733edb0810d82d2b89206da2de 100644 (file)
@@ -28,10 +28,11 @@ static int __report_module(struct addr_location *al, u64 ip,
 {
        Dwfl_Module *mod;
        struct dso *dso = NULL;
-
-       thread__find_addr_location(ui->thread,
-                                  PERF_RECORD_MISC_USER,
-                                  MAP__FUNCTION, ip, al);
+       /*
+        * Some callers will use al->sym, so we can't just use the
+        * cheaper thread__find_map() here.
+        */
+       thread__find_symbol(ui->thread, PERF_RECORD_MISC_USER, ip, al);
 
        if (al->map)
                dso = al->map->dso;
@@ -103,19 +104,7 @@ static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
        struct addr_location al;
        ssize_t size;
 
-       thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
-                             MAP__FUNCTION, addr, &al);
-       if (!al.map) {
-               /*
-                * We've seen cases (softice) where DWARF unwinder went
-                * through non executable mmaps, which we need to lookup
-                * in MAP__VARIABLE tree.
-                */
-               thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
-                                     MAP__VARIABLE, addr, &al);
-       }
-
-       if (!al.map) {
+       if (!thread__find_map(ui->thread, PERF_RECORD_MISC_USER, addr, &al)) {
                pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
                return -1;
        }
index af873044d33a234e3f4237cda3d8f90943534a9b..6a11bc7e6b27f68780b93aebcba5cae0ecdf18ec 100644 (file)
@@ -366,19 +366,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
 static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
 {
        struct addr_location al;
-
-       thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
-                             MAP__FUNCTION, ip, &al);
-       if (!al.map) {
-               /*
-                * We've seen cases (softice) where DWARF unwinder went
-                * through non executable mmaps, which we need to lookup
-                * in MAP__VARIABLE tree.
-                */
-               thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
-                                     MAP__VARIABLE, ip, &al);
-       }
-       return al.map;
+       return thread__find_map(ui->thread, PERF_RECORD_MISC_USER, ip, &al);
 }
 
 static int
@@ -586,12 +574,9 @@ static int entry(u64 ip, struct thread *thread,
        struct unwind_entry e;
        struct addr_location al;
 
-       thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
-                                  MAP__FUNCTION, ip, &al);
-
+       e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
        e.ip = al.addr;
        e.map = al.map;
-       e.sym = al.sym;
 
        pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
                 al.sym ? al.sym->name : "''",
index 1019bbc5dbd8a00ffb98fa90080fe8881f4d8a78..eac5b858a3716426be8e900bd6587e54b860cd2c 100644 (file)
@@ -38,11 +38,43 @@ void perf_set_multithreaded(void)
 }
 
 unsigned int page_size;
-int cacheline_size;
+
+#ifdef _SC_LEVEL1_DCACHE_LINESIZE
+#define cache_line_size(cacheline_sizep) *cacheline_sizep = sysconf(_SC_LEVEL1_DCACHE_LINESIZE)
+#else
+static void cache_line_size(int *cacheline_sizep)
+{
+       if (sysfs__read_int("devices/system/cpu/cpu0/cache/index0/coherency_line_size", cacheline_sizep))
+               pr_debug("cannot determine cache line size");
+}
+#endif
+
+int cacheline_size(void)
+{
+       static int size;
+
+       if (!size)
+               cache_line_size(&size);
+
+       return size;
+}
 
 int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
 int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
 
+int sysctl__max_stack(void)
+{
+       int value;
+
+       if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
+               sysctl_perf_event_max_stack = value;
+
+       if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
+               sysctl_perf_event_max_contexts_per_stack = value;
+
+       return sysctl_perf_event_max_stack;
+}
+
 bool test_attr__enabled;
 
 bool perf_host  = true;
index c9626c20620890ba689faa2207d6ade3f4433874..dc58254a2b696a9ee38c384845481632ab25c236 100644 (file)
@@ -43,7 +43,9 @@ size_t hex_width(u64 v);
 int hex2u64(const char *ptr, u64 *val);
 
 extern unsigned int page_size;
-extern int cacheline_size;
+int __pure cacheline_size(void);
+
+int sysctl__max_stack(void);
 
 int fetch_kernel_version(unsigned int *puint,
                         char *str, size_t str_sz);
index 0acb1ec0e2f08c0ead7aa14835725b3a20f2fd76..741af209b19d65283f8ce130d6104c233864dac1 100644 (file)
@@ -139,12 +139,10 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
                                              struct thread *thread)
 {
        enum dso_type dso_type = DSO__TYPE_UNKNOWN;
-       struct map *map;
-       struct dso *dso;
+       struct map *map = map_groups__first(thread->mg);
 
-       map = map_groups__first(thread->mg, MAP__FUNCTION);
        for (; map ; map = map_groups__next(map)) {
-               dso = map->dso;
+               struct dso *dso = map->dso;
                if (!dso || dso->long_name[0] != '/')
                        continue;
                dso_type = dso__type(dso, machine);