]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
perf tools: Preserve eBPF maps when loading kcore
authorJiri Olsa <jolsa@kernel.org>
Wed, 8 May 2019 13:20:06 +0000 (15:20 +0200)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 28 May 2019 21:37:42 +0000 (18:37 -0300)
We need to preserve eBPF maps even if they are covered by kcore, because
we need to access eBPF dso for source data.

Add the map_groups__merge_in function to do that.  It merges a map into
map_groups by splitting the new map within the existing map regions.

Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190508132010.14512-9-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/symbol.c

index 5cbad55cd99dfbee9729b64331ea384da46cb2a8..29780fcd049c9b4d2d6c4ac3942ccf5c082db12a 100644 (file)
@@ -1166,6 +1166,85 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
        return 0;
 }
 
+/*
+ * Merges map into map_groups by splitting the new map
+ * within the existing map regions.
+ */
+static int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
+{
+       struct map *old_map;
+       LIST_HEAD(merged);
+
+       for (old_map = map_groups__first(kmaps); old_map;
+            old_map = map_groups__next(old_map)) {
+
+               /* no overload with this one */
+               if (new_map->end < old_map->start ||
+                   new_map->start >= old_map->end)
+                       continue;
+
+               if (new_map->start < old_map->start) {
+                       /*
+                        * |new......
+                        *       |old....
+                        */
+                       if (new_map->end < old_map->end) {
+                               /*
+                                * |new......|     -> |new..|
+                                *       |old....| ->       |old....|
+                                */
+                               new_map->end = old_map->start;
+                       } else {
+                               /*
+                                * |new.............| -> |new..|       |new..|
+                                *       |old....|    ->       |old....|
+                                */
+                               struct map *m = map__clone(new_map);
+
+                               if (!m)
+                                       return -ENOMEM;
+
+                               m->end = old_map->start;
+                               list_add_tail(&m->node, &merged);
+                               new_map->start = old_map->end;
+                       }
+               } else {
+                       /*
+                        *      |new......
+                        * |old....
+                        */
+                       if (new_map->end < old_map->end) {
+                               /*
+                                *      |new..|   -> x
+                                * |old.........| -> |old.........|
+                                */
+                               map__put(new_map);
+                               new_map = NULL;
+                               break;
+                       } else {
+                               /*
+                                *      |new......| ->         |new...|
+                                * |old....|        -> |old....|
+                                */
+                               new_map->start = old_map->end;
+                       }
+               }
+       }
+
+       while (!list_empty(&merged)) {
+               old_map = list_entry(merged.next, struct map, node);
+               list_del_init(&old_map->node);
+               map_groups__insert(kmaps, old_map);
+               map__put(old_map);
+       }
+
+       if (new_map) {
+               map_groups__insert(kmaps, new_map);
+               map__put(new_map);
+       }
+       return 0;
+}
+
 static int dso__load_kcore(struct dso *dso, struct map *map,
                           const char *kallsyms_filename)
 {
@@ -1222,7 +1301,12 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
        while (old_map) {
                struct map *next = map_groups__next(old_map);
 
-               if (old_map != map)
+               /*
+                * We need to preserve eBPF maps even if they are
+                * covered by kcore, because we need to access
+                * eBPF dso for source data.
+                */
+               if (old_map != map && !__map__is_bpf_prog(old_map))
                        map_groups__remove(kmaps, old_map);
                old_map = next;
        }
@@ -1256,11 +1340,16 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
                        map_groups__remove(kmaps, map);
                        map_groups__insert(kmaps, map);
                        map__put(map);
+                       map__put(new_map);
                } else {
-                       map_groups__insert(kmaps, new_map);
+                       /*
+                        * Merge kcore map into existing maps,
+                        * and ensure that current maps (eBPF)
+                        * stay intact.
+                        */
+                       if (map_groups__merge_in(kmaps, new_map))
+                               goto out_err;
                }
-
-               map__put(new_map);
        }
 
        if (machine__is(machine, "x86_64")) {