]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
bpf: perf event change needed for subsequent bpf helpers
authorYonghong Song <yhs@fb.com>
Thu, 5 Oct 2017 16:19:19 +0000 (09:19 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 7 Oct 2017 22:05:57 +0000 (23:05 +0100)
This patch does not impact existing functionalities.
It contains the changes in perf event area needed for
subsequent bpf_perf_event_read_value and
bpf_perf_prog_read_value helpers.

Signed-off-by: Yonghong Song <yhs@fb.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/perf_event.h
kernel/bpf/arraymap.c
kernel/events/core.c
kernel/trace/bpf_trace.c

index 8e22f24ded6a3ad0e2accd96fe781be16c1987e4..79b18a20cf5d77abfa1c63d4d51327429c599583 100644 (file)
@@ -806,6 +806,7 @@ struct perf_output_handle {
 struct bpf_perf_event_data_kern {
        struct pt_regs *regs;
        struct perf_sample_data *data;
+       struct perf_event *event;
 };
 
 #ifdef CONFIG_CGROUP_PERF
@@ -884,7 +885,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                void *context);
 extern void perf_pmu_migrate_context(struct pmu *pmu,
                                int src_cpu, int dst_cpu);
-int perf_event_read_local(struct perf_event *event, u64 *value);
+int perf_event_read_local(struct perf_event *event, u64 *value,
+                         u64 *enabled, u64 *running);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
 
@@ -1286,7 +1288,8 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *
 {
        return ERR_PTR(-EINVAL);
 }
-static inline int perf_event_read_local(struct perf_event *event, u64 *value)
+static inline int perf_event_read_local(struct perf_event *event, u64 *value,
+                                       u64 *enabled, u64 *running)
 {
        return -EINVAL;
 }
index 98c0f00c3f5e05007287de1a1636c4e6722beed7..68d866628be0d69cd81ce7340ea382c378218641 100644 (file)
@@ -492,7 +492,7 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
 
        ee = ERR_PTR(-EOPNOTSUPP);
        event = perf_file->private_data;
-       if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
+       if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
                goto err_out;
 
        ee = bpf_event_entry_gen(perf_file, map_file);
index 6bc21e202ae40d36ceb9fa23d3c78785cad957da..902149f053811945d5439bbd6ca946ce08841520 100644 (file)
@@ -3684,10 +3684,12 @@ static inline u64 perf_event_count(struct perf_event *event)
  *     will not be local and we cannot read them atomically
  *   - must not have a pmu::count method
  */
-int perf_event_read_local(struct perf_event *event, u64 *value)
+int perf_event_read_local(struct perf_event *event, u64 *value,
+                         u64 *enabled, u64 *running)
 {
        unsigned long flags;
        int ret = 0;
+       u64 now;
 
        /*
         * Disabling interrupts avoids all counter scheduling (context
@@ -3718,13 +3720,21 @@ int perf_event_read_local(struct perf_event *event, u64 *value)
                goto out;
        }
 
+       now = event->shadow_ctx_time + perf_clock();
+       if (enabled)
+               *enabled = now - event->tstamp_enabled;
        /*
         * If the event is currently on this CPU, its either a per-task event,
         * or local to this CPU. Furthermore it means its ACTIVE (otherwise
         * oncpu == -1).
         */
-       if (event->oncpu == smp_processor_id())
+       if (event->oncpu == smp_processor_id()) {
                event->pmu->read(event);
+               if (running)
+                       *running = now - event->tstamp_running;
+       } else if (running) {
+               *running = event->total_time_running;
+       }
 
        *value = local64_read(&event->count);
 out:
@@ -8072,6 +8082,7 @@ static void bpf_overflow_handler(struct perf_event *event,
        struct bpf_perf_event_data_kern ctx = {
                .data = data,
                .regs = regs,
+               .event = event,
        };
        int ret = 0;
 
index dc498b605d5dd36137eaba7bd0ee93da72a36c33..95888ae6c2634b9e126000cbd009a7f5825b1309 100644 (file)
@@ -275,7 +275,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
        if (!ee)
                return -ENOENT;
 
-       err = perf_event_read_local(ee->event, &value);
+       err = perf_event_read_local(ee->event, &value, NULL, NULL);
        /*
         * this api is ugly since we miss [-22..-2] range of valid
         * counter values, but that's uapi