]> asedeno.scripts.mit.edu Git - linux.git/blob - tools/perf/util/bpf-loader.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / tools / perf / util / bpf-loader.c
1 /*
2  * bpf-loader.c
3  *
4  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
5  * Copyright (C) 2015 Huawei Inc.
6  */
7
8 #include <linux/bpf.h>
9 #include <bpf/libbpf.h>
10 #include <bpf/bpf.h>
11 #include <linux/err.h>
12 #include <linux/string.h>
13 #include "perf.h"
14 #include "debug.h"
15 #include "bpf-loader.h"
16 #include "bpf-prologue.h"
17 #include "probe-event.h"
18 #include "probe-finder.h" // for MAX_PROBES
19 #include "parse-events.h"
20 #include "llvm-utils.h"
21 #include "c++/clang-c.h"
22
23 #define DEFINE_PRINT_FN(name, level) \
24 static int libbpf_##name(const char *fmt, ...)  \
25 {                                               \
26         va_list args;                           \
27         int ret;                                \
28                                                 \
29         va_start(args, fmt);                    \
30         ret = veprintf(level, verbose, pr_fmt(fmt), args);\
31         va_end(args);                           \
32         return ret;                             \
33 }
34
35 DEFINE_PRINT_FN(warning, 1)
36 DEFINE_PRINT_FN(info, 1)
37 DEFINE_PRINT_FN(debug, 1)
38
39 struct bpf_prog_priv {
40         bool is_tp;
41         char *sys_name;
42         char *evt_name;
43         struct perf_probe_event pev;
44         bool need_prologue;
45         struct bpf_insn *insns_buf;
46         int nr_types;
47         int *type_mapping;
48 };
49
50 static bool libbpf_initialized;
51
52 struct bpf_object *
53 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
54 {
55         struct bpf_object *obj;
56
57         if (!libbpf_initialized) {
58                 libbpf_set_print(libbpf_warning,
59                                  libbpf_info,
60                                  libbpf_debug);
61                 libbpf_initialized = true;
62         }
63
64         obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
65         if (IS_ERR(obj)) {
66                 pr_debug("bpf: failed to load buffer\n");
67                 return ERR_PTR(-EINVAL);
68         }
69
70         return obj;
71 }
72
73 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
74 {
75         struct bpf_object *obj;
76
77         if (!libbpf_initialized) {
78                 libbpf_set_print(libbpf_warning,
79                                  libbpf_info,
80                                  libbpf_debug);
81                 libbpf_initialized = true;
82         }
83
84         if (source) {
85                 int err;
86                 void *obj_buf;
87                 size_t obj_buf_sz;
88
89                 perf_clang__init();
90                 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
91                 perf_clang__cleanup();
92                 if (err) {
93                         pr_warning("bpf: builtin compilation failed: %d, try external compiler\n", err);
94                         err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
95                         if (err)
96                                 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
97                 } else
98                         pr_debug("bpf: successfull builtin compilation\n");
99                 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
100
101                 if (!IS_ERR(obj) && llvm_param.dump_obj)
102                         llvm__dump_obj(filename, obj_buf, obj_buf_sz);
103
104                 free(obj_buf);
105         } else
106                 obj = bpf_object__open(filename);
107
108         if (IS_ERR(obj)) {
109                 pr_debug("bpf: failed to load %s\n", filename);
110                 return obj;
111         }
112
113         return obj;
114 }
115
116 void bpf__clear(void)
117 {
118         struct bpf_object *obj, *tmp;
119
120         bpf_object__for_each_safe(obj, tmp) {
121                 bpf__unprobe(obj);
122                 bpf_object__close(obj);
123         }
124 }
125
126 static void
127 clear_prog_priv(struct bpf_program *prog __maybe_unused,
128                 void *_priv)
129 {
130         struct bpf_prog_priv *priv = _priv;
131
132         cleanup_perf_probe_events(&priv->pev, 1);
133         zfree(&priv->insns_buf);
134         zfree(&priv->type_mapping);
135         zfree(&priv->sys_name);
136         zfree(&priv->evt_name);
137         free(priv);
138 }
139
140 static int
141 prog_config__exec(const char *value, struct perf_probe_event *pev)
142 {
143         pev->uprobes = true;
144         pev->target = strdup(value);
145         if (!pev->target)
146                 return -ENOMEM;
147         return 0;
148 }
149
150 static int
151 prog_config__module(const char *value, struct perf_probe_event *pev)
152 {
153         pev->uprobes = false;
154         pev->target = strdup(value);
155         if (!pev->target)
156                 return -ENOMEM;
157         return 0;
158 }
159
160 static int
161 prog_config__bool(const char *value, bool *pbool, bool invert)
162 {
163         int err;
164         bool bool_value;
165
166         if (!pbool)
167                 return -EINVAL;
168
169         err = strtobool(value, &bool_value);
170         if (err)
171                 return err;
172
173         *pbool = invert ? !bool_value : bool_value;
174         return 0;
175 }
176
177 static int
178 prog_config__inlines(const char *value,
179                      struct perf_probe_event *pev __maybe_unused)
180 {
181         return prog_config__bool(value, &probe_conf.no_inlines, true);
182 }
183
184 static int
185 prog_config__force(const char *value,
186                    struct perf_probe_event *pev __maybe_unused)
187 {
188         return prog_config__bool(value, &probe_conf.force_add, false);
189 }
190
191 static struct {
192         const char *key;
193         const char *usage;
194         const char *desc;
195         int (*func)(const char *, struct perf_probe_event *);
196 } bpf_prog_config_terms[] = {
197         {
198                 .key    = "exec",
199                 .usage  = "exec=<full path of file>",
200                 .desc   = "Set uprobe target",
201                 .func   = prog_config__exec,
202         },
203         {
204                 .key    = "module",
205                 .usage  = "module=<module name>    ",
206                 .desc   = "Set kprobe module",
207                 .func   = prog_config__module,
208         },
209         {
210                 .key    = "inlines",
211                 .usage  = "inlines=[yes|no]        ",
212                 .desc   = "Probe at inline symbol",
213                 .func   = prog_config__inlines,
214         },
215         {
216                 .key    = "force",
217                 .usage  = "force=[yes|no]          ",
218                 .desc   = "Forcibly add events with existing name",
219                 .func   = prog_config__force,
220         },
221 };
222
223 static int
224 do_prog_config(const char *key, const char *value,
225                struct perf_probe_event *pev)
226 {
227         unsigned int i;
228
229         pr_debug("config bpf program: %s=%s\n", key, value);
230         for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
231                 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
232                         return bpf_prog_config_terms[i].func(value, pev);
233
234         pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
235                  key, value);
236
237         pr_debug("\nHint: Valid options are:\n");
238         for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
239                 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
240                          bpf_prog_config_terms[i].desc);
241         pr_debug("\n");
242
243         return -BPF_LOADER_ERRNO__PROGCONF_TERM;
244 }
245
246 static const char *
247 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
248 {
249         char *text = strdup(config_str);
250         char *sep, *line;
251         const char *main_str = NULL;
252         int err = 0;
253
254         if (!text) {
255                 pr_debug("Not enough memory: dup config_str failed\n");
256                 return ERR_PTR(-ENOMEM);
257         }
258
259         line = text;
260         while ((sep = strchr(line, ';'))) {
261                 char *equ;
262
263                 *sep = '\0';
264                 equ = strchr(line, '=');
265                 if (!equ) {
266                         pr_warning("WARNING: invalid config in BPF object: %s\n",
267                                    line);
268                         pr_warning("\tShould be 'key=value'.\n");
269                         goto nextline;
270                 }
271                 *equ = '\0';
272
273                 err = do_prog_config(line, equ + 1, pev);
274                 if (err)
275                         break;
276 nextline:
277                 line = sep + 1;
278         }
279
280         if (!err)
281                 main_str = config_str + (line - text);
282         free(text);
283
284         return err ? ERR_PTR(err) : main_str;
285 }
286
287 static int
288 parse_prog_config(const char *config_str, const char **p_main_str,
289                   bool *is_tp, struct perf_probe_event *pev)
290 {
291         int err;
292         const char *main_str = parse_prog_config_kvpair(config_str, pev);
293
294         if (IS_ERR(main_str))
295                 return PTR_ERR(main_str);
296
297         *p_main_str = main_str;
298         if (!strchr(main_str, '=')) {
299                 /* Is a tracepoint event? */
300                 const char *s = strchr(main_str, ':');
301
302                 if (!s) {
303                         pr_debug("bpf: '%s' is not a valid tracepoint\n",
304                                  config_str);
305                         return -BPF_LOADER_ERRNO__CONFIG;
306                 }
307
308                 *is_tp = true;
309                 return 0;
310         }
311
312         *is_tp = false;
313         err = parse_perf_probe_command(main_str, pev);
314         if (err < 0) {
315                 pr_debug("bpf: '%s' is not a valid config string\n",
316                          config_str);
317                 /* parse failed, don't need clear pev. */
318                 return -BPF_LOADER_ERRNO__CONFIG;
319         }
320         return 0;
321 }
322
323 static int
324 config_bpf_program(struct bpf_program *prog)
325 {
326         struct perf_probe_event *pev = NULL;
327         struct bpf_prog_priv *priv = NULL;
328         const char *config_str, *main_str;
329         bool is_tp = false;
330         int err;
331
332         /* Initialize per-program probing setting */
333         probe_conf.no_inlines = false;
334         probe_conf.force_add = false;
335
336         config_str = bpf_program__title(prog, false);
337         if (IS_ERR(config_str)) {
338                 pr_debug("bpf: unable to get title for program\n");
339                 return PTR_ERR(config_str);
340         }
341
342         priv = calloc(sizeof(*priv), 1);
343         if (!priv) {
344                 pr_debug("bpf: failed to alloc priv\n");
345                 return -ENOMEM;
346         }
347         pev = &priv->pev;
348
349         pr_debug("bpf: config program '%s'\n", config_str);
350         err = parse_prog_config(config_str, &main_str, &is_tp, pev);
351         if (err)
352                 goto errout;
353
354         if (is_tp) {
355                 char *s = strchr(main_str, ':');
356
357                 priv->is_tp = true;
358                 priv->sys_name = strndup(main_str, s - main_str);
359                 priv->evt_name = strdup(s + 1);
360                 goto set_priv;
361         }
362
363         if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
364                 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
365                          config_str, PERF_BPF_PROBE_GROUP);
366                 err = -BPF_LOADER_ERRNO__GROUP;
367                 goto errout;
368         } else if (!pev->group)
369                 pev->group = strdup(PERF_BPF_PROBE_GROUP);
370
371         if (!pev->group) {
372                 pr_debug("bpf: strdup failed\n");
373                 err = -ENOMEM;
374                 goto errout;
375         }
376
377         if (!pev->event) {
378                 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
379                          config_str);
380                 err = -BPF_LOADER_ERRNO__EVENTNAME;
381                 goto errout;
382         }
383         pr_debug("bpf: config '%s' is ok\n", config_str);
384
385 set_priv:
386         err = bpf_program__set_priv(prog, priv, clear_prog_priv);
387         if (err) {
388                 pr_debug("Failed to set priv for program '%s'\n", config_str);
389                 goto errout;
390         }
391
392         return 0;
393
394 errout:
395         if (pev)
396                 clear_perf_probe_event(pev);
397         free(priv);
398         return err;
399 }
400
401 static int bpf__prepare_probe(void)
402 {
403         static int err = 0;
404         static bool initialized = false;
405
406         /*
407          * Make err static, so if init failed the first, bpf__prepare_probe()
408          * fails each time without calling init_probe_symbol_maps multiple
409          * times.
410          */
411         if (initialized)
412                 return err;
413
414         initialized = true;
415         err = init_probe_symbol_maps(false);
416         if (err < 0)
417                 pr_debug("Failed to init_probe_symbol_maps\n");
418         probe_conf.max_probes = MAX_PROBES;
419         return err;
420 }
421
422 static int
423 preproc_gen_prologue(struct bpf_program *prog, int n,
424                      struct bpf_insn *orig_insns, int orig_insns_cnt,
425                      struct bpf_prog_prep_result *res)
426 {
427         struct bpf_prog_priv *priv = bpf_program__priv(prog);
428         struct probe_trace_event *tev;
429         struct perf_probe_event *pev;
430         struct bpf_insn *buf;
431         size_t prologue_cnt = 0;
432         int i, err;
433
434         if (IS_ERR(priv) || !priv || priv->is_tp)
435                 goto errout;
436
437         pev = &priv->pev;
438
439         if (n < 0 || n >= priv->nr_types)
440                 goto errout;
441
442         /* Find a tev belongs to that type */
443         for (i = 0; i < pev->ntevs; i++) {
444                 if (priv->type_mapping[i] == n)
445                         break;
446         }
447
448         if (i >= pev->ntevs) {
449                 pr_debug("Internal error: prologue type %d not found\n", n);
450                 return -BPF_LOADER_ERRNO__PROLOGUE;
451         }
452
453         tev = &pev->tevs[i];
454
455         buf = priv->insns_buf;
456         err = bpf__gen_prologue(tev->args, tev->nargs,
457                                 buf, &prologue_cnt,
458                                 BPF_MAXINSNS - orig_insns_cnt);
459         if (err) {
460                 const char *title;
461
462                 title = bpf_program__title(prog, false);
463                 if (!title)
464                         title = "[unknown]";
465
466                 pr_debug("Failed to generate prologue for program %s\n",
467                          title);
468                 return err;
469         }
470
471         memcpy(&buf[prologue_cnt], orig_insns,
472                sizeof(struct bpf_insn) * orig_insns_cnt);
473
474         res->new_insn_ptr = buf;
475         res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
476         res->pfd = NULL;
477         return 0;
478
479 errout:
480         pr_debug("Internal error in preproc_gen_prologue\n");
481         return -BPF_LOADER_ERRNO__PROLOGUE;
482 }
483
484 /*
485  * compare_tev_args is reflexive, transitive and antisymmetric.
486  * I can proof it but this margin is too narrow to contain.
487  */
488 static int compare_tev_args(const void *ptev1, const void *ptev2)
489 {
490         int i, ret;
491         const struct probe_trace_event *tev1 =
492                 *(const struct probe_trace_event **)ptev1;
493         const struct probe_trace_event *tev2 =
494                 *(const struct probe_trace_event **)ptev2;
495
496         ret = tev2->nargs - tev1->nargs;
497         if (ret)
498                 return ret;
499
500         for (i = 0; i < tev1->nargs; i++) {
501                 struct probe_trace_arg *arg1, *arg2;
502                 struct probe_trace_arg_ref *ref1, *ref2;
503
504                 arg1 = &tev1->args[i];
505                 arg2 = &tev2->args[i];
506
507                 ret = strcmp(arg1->value, arg2->value);
508                 if (ret)
509                         return ret;
510
511                 ref1 = arg1->ref;
512                 ref2 = arg2->ref;
513
514                 while (ref1 && ref2) {
515                         ret = ref2->offset - ref1->offset;
516                         if (ret)
517                                 return ret;
518
519                         ref1 = ref1->next;
520                         ref2 = ref2->next;
521                 }
522
523                 if (ref1 || ref2)
524                         return ref2 ? 1 : -1;
525         }
526
527         return 0;
528 }
529
530 /*
531  * Assign a type number to each tevs in a pev.
532  * mapping is an array with same slots as tevs in that pev.
533  * nr_types will be set to number of types.
534  */
535 static int map_prologue(struct perf_probe_event *pev, int *mapping,
536                         int *nr_types)
537 {
538         int i, type = 0;
539         struct probe_trace_event **ptevs;
540
541         size_t array_sz = sizeof(*ptevs) * pev->ntevs;
542
543         ptevs = malloc(array_sz);
544         if (!ptevs) {
545                 pr_debug("Not enough memory: alloc ptevs failed\n");
546                 return -ENOMEM;
547         }
548
549         pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
550         for (i = 0; i < pev->ntevs; i++)
551                 ptevs[i] = &pev->tevs[i];
552
553         qsort(ptevs, pev->ntevs, sizeof(*ptevs),
554               compare_tev_args);
555
556         for (i = 0; i < pev->ntevs; i++) {
557                 int n;
558
559                 n = ptevs[i] - pev->tevs;
560                 if (i == 0) {
561                         mapping[n] = type;
562                         pr_debug("mapping[%d]=%d\n", n, type);
563                         continue;
564                 }
565
566                 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
567                         mapping[n] = type;
568                 else
569                         mapping[n] = ++type;
570
571                 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
572         }
573         free(ptevs);
574         *nr_types = type + 1;
575
576         return 0;
577 }
578
579 static int hook_load_preprocessor(struct bpf_program *prog)
580 {
581         struct bpf_prog_priv *priv = bpf_program__priv(prog);
582         struct perf_probe_event *pev;
583         bool need_prologue = false;
584         int err, i;
585
586         if (IS_ERR(priv) || !priv) {
587                 pr_debug("Internal error when hook preprocessor\n");
588                 return -BPF_LOADER_ERRNO__INTERNAL;
589         }
590
591         if (priv->is_tp) {
592                 priv->need_prologue = false;
593                 return 0;
594         }
595
596         pev = &priv->pev;
597         for (i = 0; i < pev->ntevs; i++) {
598                 struct probe_trace_event *tev = &pev->tevs[i];
599
600                 if (tev->nargs > 0) {
601                         need_prologue = true;
602                         break;
603                 }
604         }
605
606         /*
607          * Since all tevs don't have argument, we don't need generate
608          * prologue.
609          */
610         if (!need_prologue) {
611                 priv->need_prologue = false;
612                 return 0;
613         }
614
615         priv->need_prologue = true;
616         priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
617         if (!priv->insns_buf) {
618                 pr_debug("Not enough memory: alloc insns_buf failed\n");
619                 return -ENOMEM;
620         }
621
622         priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
623         if (!priv->type_mapping) {
624                 pr_debug("Not enough memory: alloc type_mapping failed\n");
625                 return -ENOMEM;
626         }
627         memset(priv->type_mapping, -1,
628                sizeof(int) * pev->ntevs);
629
630         err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
631         if (err)
632                 return err;
633
634         err = bpf_program__set_prep(prog, priv->nr_types,
635                                     preproc_gen_prologue);
636         return err;
637 }
638
639 int bpf__probe(struct bpf_object *obj)
640 {
641         int err = 0;
642         struct bpf_program *prog;
643         struct bpf_prog_priv *priv;
644         struct perf_probe_event *pev;
645
646         err = bpf__prepare_probe();
647         if (err) {
648                 pr_debug("bpf__prepare_probe failed\n");
649                 return err;
650         }
651
652         bpf_object__for_each_program(prog, obj) {
653                 err = config_bpf_program(prog);
654                 if (err)
655                         goto out;
656
657                 priv = bpf_program__priv(prog);
658                 if (IS_ERR(priv) || !priv) {
659                         err = PTR_ERR(priv);
660                         goto out;
661                 }
662
663                 if (priv->is_tp) {
664                         bpf_program__set_tracepoint(prog);
665                         continue;
666                 }
667
668                 bpf_program__set_kprobe(prog);
669                 pev = &priv->pev;
670
671                 err = convert_perf_probe_events(pev, 1);
672                 if (err < 0) {
673                         pr_debug("bpf_probe: failed to convert perf probe events\n");
674                         goto out;
675                 }
676
677                 err = apply_perf_probe_events(pev, 1);
678                 if (err < 0) {
679                         pr_debug("bpf_probe: failed to apply perf probe events\n");
680                         goto out;
681                 }
682
683                 /*
684                  * After probing, let's consider prologue, which
685                  * adds program fetcher to BPF programs.
686                  *
687                  * hook_load_preprocessorr() hooks pre-processor
688                  * to bpf_program, let it generate prologue
689                  * dynamically during loading.
690                  */
691                 err = hook_load_preprocessor(prog);
692                 if (err)
693                         goto out;
694         }
695 out:
696         return err < 0 ? err : 0;
697 }
698
699 #define EVENTS_WRITE_BUFSIZE  4096
700 int bpf__unprobe(struct bpf_object *obj)
701 {
702         int err, ret = 0;
703         struct bpf_program *prog;
704
705         bpf_object__for_each_program(prog, obj) {
706                 struct bpf_prog_priv *priv = bpf_program__priv(prog);
707                 int i;
708
709                 if (IS_ERR(priv) || !priv || priv->is_tp)
710                         continue;
711
712                 for (i = 0; i < priv->pev.ntevs; i++) {
713                         struct probe_trace_event *tev = &priv->pev.tevs[i];
714                         char name_buf[EVENTS_WRITE_BUFSIZE];
715                         struct strfilter *delfilter;
716
717                         snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
718                                  "%s:%s", tev->group, tev->event);
719                         name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
720
721                         delfilter = strfilter__new(name_buf, NULL);
722                         if (!delfilter) {
723                                 pr_debug("Failed to create filter for unprobing\n");
724                                 ret = -ENOMEM;
725                                 continue;
726                         }
727
728                         err = del_perf_probe_events(delfilter);
729                         strfilter__delete(delfilter);
730                         if (err) {
731                                 pr_debug("Failed to delete %s\n", name_buf);
732                                 ret = err;
733                                 continue;
734                         }
735                 }
736         }
737         return ret;
738 }
739
740 int bpf__load(struct bpf_object *obj)
741 {
742         int err;
743
744         err = bpf_object__load(obj);
745         if (err) {
746                 pr_debug("bpf: load objects failed\n");
747                 return err;
748         }
749         return 0;
750 }
751
752 int bpf__foreach_event(struct bpf_object *obj,
753                        bpf_prog_iter_callback_t func,
754                        void *arg)
755 {
756         struct bpf_program *prog;
757         int err;
758
759         bpf_object__for_each_program(prog, obj) {
760                 struct bpf_prog_priv *priv = bpf_program__priv(prog);
761                 struct probe_trace_event *tev;
762                 struct perf_probe_event *pev;
763                 int i, fd;
764
765                 if (IS_ERR(priv) || !priv) {
766                         pr_debug("bpf: failed to get private field\n");
767                         return -BPF_LOADER_ERRNO__INTERNAL;
768                 }
769
770                 if (priv->is_tp) {
771                         fd = bpf_program__fd(prog);
772                         err = (*func)(priv->sys_name, priv->evt_name, fd, arg);
773                         if (err) {
774                                 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
775                                 return err;
776                         }
777                         continue;
778                 }
779
780                 pev = &priv->pev;
781                 for (i = 0; i < pev->ntevs; i++) {
782                         tev = &pev->tevs[i];
783
784                         if (priv->need_prologue) {
785                                 int type = priv->type_mapping[i];
786
787                                 fd = bpf_program__nth_fd(prog, type);
788                         } else {
789                                 fd = bpf_program__fd(prog);
790                         }
791
792                         if (fd < 0) {
793                                 pr_debug("bpf: failed to get file descriptor\n");
794                                 return fd;
795                         }
796
797                         err = (*func)(tev->group, tev->event, fd, arg);
798                         if (err) {
799                                 pr_debug("bpf: call back failed, stop iterate\n");
800                                 return err;
801                         }
802                 }
803         }
804         return 0;
805 }
806
807 enum bpf_map_op_type {
808         BPF_MAP_OP_SET_VALUE,
809         BPF_MAP_OP_SET_EVSEL,
810 };
811
812 enum bpf_map_key_type {
813         BPF_MAP_KEY_ALL,
814         BPF_MAP_KEY_RANGES,
815 };
816
817 struct bpf_map_op {
818         struct list_head list;
819         enum bpf_map_op_type op_type;
820         enum bpf_map_key_type key_type;
821         union {
822                 struct parse_events_array array;
823         } k;
824         union {
825                 u64 value;
826                 struct perf_evsel *evsel;
827         } v;
828 };
829
830 struct bpf_map_priv {
831         struct list_head ops_list;
832 };
833
834 static void
835 bpf_map_op__delete(struct bpf_map_op *op)
836 {
837         if (!list_empty(&op->list))
838                 list_del(&op->list);
839         if (op->key_type == BPF_MAP_KEY_RANGES)
840                 parse_events__clear_array(&op->k.array);
841         free(op);
842 }
843
844 static void
845 bpf_map_priv__purge(struct bpf_map_priv *priv)
846 {
847         struct bpf_map_op *pos, *n;
848
849         list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
850                 list_del_init(&pos->list);
851                 bpf_map_op__delete(pos);
852         }
853 }
854
855 static void
856 bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
857                     void *_priv)
858 {
859         struct bpf_map_priv *priv = _priv;
860
861         bpf_map_priv__purge(priv);
862         free(priv);
863 }
864
865 static int
866 bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
867 {
868         op->key_type = BPF_MAP_KEY_ALL;
869         if (!term)
870                 return 0;
871
872         if (term->array.nr_ranges) {
873                 size_t memsz = term->array.nr_ranges *
874                                 sizeof(op->k.array.ranges[0]);
875
876                 op->k.array.ranges = memdup(term->array.ranges, memsz);
877                 if (!op->k.array.ranges) {
878                         pr_debug("Not enough memory to alloc indices for map\n");
879                         return -ENOMEM;
880                 }
881                 op->key_type = BPF_MAP_KEY_RANGES;
882                 op->k.array.nr_ranges = term->array.nr_ranges;
883         }
884         return 0;
885 }
886
887 static struct bpf_map_op *
888 bpf_map_op__new(struct parse_events_term *term)
889 {
890         struct bpf_map_op *op;
891         int err;
892
893         op = zalloc(sizeof(*op));
894         if (!op) {
895                 pr_debug("Failed to alloc bpf_map_op\n");
896                 return ERR_PTR(-ENOMEM);
897         }
898         INIT_LIST_HEAD(&op->list);
899
900         err = bpf_map_op_setkey(op, term);
901         if (err) {
902                 free(op);
903                 return ERR_PTR(err);
904         }
905         return op;
906 }
907
908 static struct bpf_map_op *
909 bpf_map_op__clone(struct bpf_map_op *op)
910 {
911         struct bpf_map_op *newop;
912
913         newop = memdup(op, sizeof(*op));
914         if (!newop) {
915                 pr_debug("Failed to alloc bpf_map_op\n");
916                 return NULL;
917         }
918
919         INIT_LIST_HEAD(&newop->list);
920         if (op->key_type == BPF_MAP_KEY_RANGES) {
921                 size_t memsz = op->k.array.nr_ranges *
922                                sizeof(op->k.array.ranges[0]);
923
924                 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
925                 if (!newop->k.array.ranges) {
926                         pr_debug("Failed to alloc indices for map\n");
927                         free(newop);
928                         return NULL;
929                 }
930         }
931
932         return newop;
933 }
934
935 static struct bpf_map_priv *
936 bpf_map_priv__clone(struct bpf_map_priv *priv)
937 {
938         struct bpf_map_priv *newpriv;
939         struct bpf_map_op *pos, *newop;
940
941         newpriv = zalloc(sizeof(*newpriv));
942         if (!newpriv) {
943                 pr_debug("Not enough memory to alloc map private\n");
944                 return NULL;
945         }
946         INIT_LIST_HEAD(&newpriv->ops_list);
947
948         list_for_each_entry(pos, &priv->ops_list, list) {
949                 newop = bpf_map_op__clone(pos);
950                 if (!newop) {
951                         bpf_map_priv__purge(newpriv);
952                         return NULL;
953                 }
954                 list_add_tail(&newop->list, &newpriv->ops_list);
955         }
956
957         return newpriv;
958 }
959
960 static int
961 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
962 {
963         const char *map_name = bpf_map__name(map);
964         struct bpf_map_priv *priv = bpf_map__priv(map);
965
966         if (IS_ERR(priv)) {
967                 pr_debug("Failed to get private from map %s\n", map_name);
968                 return PTR_ERR(priv);
969         }
970
971         if (!priv) {
972                 priv = zalloc(sizeof(*priv));
973                 if (!priv) {
974                         pr_debug("Not enough memory to alloc map private\n");
975                         return -ENOMEM;
976                 }
977                 INIT_LIST_HEAD(&priv->ops_list);
978
979                 if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
980                         free(priv);
981                         return -BPF_LOADER_ERRNO__INTERNAL;
982                 }
983         }
984
985         list_add_tail(&op->list, &priv->ops_list);
986         return 0;
987 }
988
989 static struct bpf_map_op *
990 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
991 {
992         struct bpf_map_op *op;
993         int err;
994
995         op = bpf_map_op__new(term);
996         if (IS_ERR(op))
997                 return op;
998
999         err = bpf_map__add_op(map, op);
1000         if (err) {
1001                 bpf_map_op__delete(op);
1002                 return ERR_PTR(err);
1003         }
1004         return op;
1005 }
1006
1007 static int
1008 __bpf_map__config_value(struct bpf_map *map,
1009                         struct parse_events_term *term)
1010 {
1011         struct bpf_map_op *op;
1012         const char *map_name = bpf_map__name(map);
1013         const struct bpf_map_def *def = bpf_map__def(map);
1014
1015         if (IS_ERR(def)) {
1016                 pr_debug("Unable to get map definition from '%s'\n",
1017                          map_name);
1018                 return -BPF_LOADER_ERRNO__INTERNAL;
1019         }
1020
1021         if (def->type != BPF_MAP_TYPE_ARRAY) {
1022                 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1023                          map_name);
1024                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1025         }
1026         if (def->key_size < sizeof(unsigned int)) {
1027                 pr_debug("Map %s has incorrect key size\n", map_name);
1028                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1029         }
1030         switch (def->value_size) {
1031         case 1:
1032         case 2:
1033         case 4:
1034         case 8:
1035                 break;
1036         default:
1037                 pr_debug("Map %s has incorrect value size\n", map_name);
1038                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1039         }
1040
1041         op = bpf_map__add_newop(map, term);
1042         if (IS_ERR(op))
1043                 return PTR_ERR(op);
1044         op->op_type = BPF_MAP_OP_SET_VALUE;
1045         op->v.value = term->val.num;
1046         return 0;
1047 }
1048
1049 static int
1050 bpf_map__config_value(struct bpf_map *map,
1051                       struct parse_events_term *term,
1052                       struct perf_evlist *evlist __maybe_unused)
1053 {
1054         if (!term->err_val) {
1055                 pr_debug("Config value not set\n");
1056                 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1057         }
1058
1059         if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1060                 pr_debug("ERROR: wrong value type for 'value'\n");
1061                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1062         }
1063
1064         return __bpf_map__config_value(map, term);
1065 }
1066
1067 static int
1068 __bpf_map__config_event(struct bpf_map *map,
1069                         struct parse_events_term *term,
1070                         struct perf_evlist *evlist)
1071 {
1072         struct perf_evsel *evsel;
1073         const struct bpf_map_def *def;
1074         struct bpf_map_op *op;
1075         const char *map_name = bpf_map__name(map);
1076
1077         evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
1078         if (!evsel) {
1079                 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1080                          map_name, term->val.str);
1081                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1082         }
1083
1084         def = bpf_map__def(map);
1085         if (IS_ERR(def)) {
1086                 pr_debug("Unable to get map definition from '%s'\n",
1087                          map_name);
1088                 return PTR_ERR(def);
1089         }
1090
1091         /*
1092          * No need to check key_size and value_size:
1093          * kernel has already checked them.
1094          */
1095         if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1096                 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1097                          map_name);
1098                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1099         }
1100
1101         op = bpf_map__add_newop(map, term);
1102         if (IS_ERR(op))
1103                 return PTR_ERR(op);
1104         op->op_type = BPF_MAP_OP_SET_EVSEL;
1105         op->v.evsel = evsel;
1106         return 0;
1107 }
1108
1109 static int
1110 bpf_map__config_event(struct bpf_map *map,
1111                       struct parse_events_term *term,
1112                       struct perf_evlist *evlist)
1113 {
1114         if (!term->err_val) {
1115                 pr_debug("Config value not set\n");
1116                 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1117         }
1118
1119         if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1120                 pr_debug("ERROR: wrong value type for 'event'\n");
1121                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1122         }
1123
1124         return __bpf_map__config_event(map, term, evlist);
1125 }
1126
1127 struct bpf_obj_config__map_func {
1128         const char *config_opt;
1129         int (*config_func)(struct bpf_map *, struct parse_events_term *,
1130                            struct perf_evlist *);
1131 };
1132
1133 struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1134         {"value", bpf_map__config_value},
1135         {"event", bpf_map__config_event},
1136 };
1137
1138 static int
1139 config_map_indices_range_check(struct parse_events_term *term,
1140                                struct bpf_map *map,
1141                                const char *map_name)
1142 {
1143         struct parse_events_array *array = &term->array;
1144         const struct bpf_map_def *def;
1145         unsigned int i;
1146
1147         if (!array->nr_ranges)
1148                 return 0;
1149         if (!array->ranges) {
1150                 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1151                          map_name, (int)array->nr_ranges);
1152                 return -BPF_LOADER_ERRNO__INTERNAL;
1153         }
1154
1155         def = bpf_map__def(map);
1156         if (IS_ERR(def)) {
1157                 pr_debug("ERROR: Unable to get map definition from '%s'\n",
1158                          map_name);
1159                 return -BPF_LOADER_ERRNO__INTERNAL;
1160         }
1161
1162         for (i = 0; i < array->nr_ranges; i++) {
1163                 unsigned int start = array->ranges[i].start;
1164                 size_t length = array->ranges[i].length;
1165                 unsigned int idx = start + length - 1;
1166
1167                 if (idx >= def->max_entries) {
1168                         pr_debug("ERROR: index %d too large\n", idx);
1169                         return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1170                 }
1171         }
1172         return 0;
1173 }
1174
1175 static int
1176 bpf__obj_config_map(struct bpf_object *obj,
1177                     struct parse_events_term *term,
1178                     struct perf_evlist *evlist,
1179                     int *key_scan_pos)
1180 {
1181         /* key is "map:<mapname>.<config opt>" */
1182         char *map_name = strdup(term->config + sizeof("map:") - 1);
1183         struct bpf_map *map;
1184         int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1185         char *map_opt;
1186         size_t i;
1187
1188         if (!map_name)
1189                 return -ENOMEM;
1190
1191         map_opt = strchr(map_name, '.');
1192         if (!map_opt) {
1193                 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1194                 goto out;
1195         }
1196
1197         *map_opt++ = '\0';
1198         if (*map_opt == '\0') {
1199                 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1200                 goto out;
1201         }
1202
1203         map = bpf_object__find_map_by_name(obj, map_name);
1204         if (!map) {
1205                 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1206                 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1207                 goto out;
1208         }
1209
1210         *key_scan_pos += strlen(map_opt);
1211         err = config_map_indices_range_check(term, map, map_name);
1212         if (err)
1213                 goto out;
1214         *key_scan_pos -= strlen(map_opt);
1215
1216         for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1217                 struct bpf_obj_config__map_func *func =
1218                                 &bpf_obj_config__map_funcs[i];
1219
1220                 if (strcmp(map_opt, func->config_opt) == 0) {
1221                         err = func->config_func(map, term, evlist);
1222                         goto out;
1223                 }
1224         }
1225
1226         pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1227         err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1228 out:
1229         free(map_name);
1230         if (!err)
1231                 key_scan_pos += strlen(map_opt);
1232         return err;
1233 }
1234
1235 int bpf__config_obj(struct bpf_object *obj,
1236                     struct parse_events_term *term,
1237                     struct perf_evlist *evlist,
1238                     int *error_pos)
1239 {
1240         int key_scan_pos = 0;
1241         int err;
1242
1243         if (!obj || !term || !term->config)
1244                 return -EINVAL;
1245
1246         if (!prefixcmp(term->config, "map:")) {
1247                 key_scan_pos = sizeof("map:") - 1;
1248                 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1249                 goto out;
1250         }
1251         err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1252 out:
1253         if (error_pos)
1254                 *error_pos = key_scan_pos;
1255         return err;
1256
1257 }
1258
1259 typedef int (*map_config_func_t)(const char *name, int map_fd,
1260                                  const struct bpf_map_def *pdef,
1261                                  struct bpf_map_op *op,
1262                                  void *pkey, void *arg);
1263
1264 static int
1265 foreach_key_array_all(map_config_func_t func,
1266                       void *arg, const char *name,
1267                       int map_fd, const struct bpf_map_def *pdef,
1268                       struct bpf_map_op *op)
1269 {
1270         unsigned int i;
1271         int err;
1272
1273         for (i = 0; i < pdef->max_entries; i++) {
1274                 err = func(name, map_fd, pdef, op, &i, arg);
1275                 if (err) {
1276                         pr_debug("ERROR: failed to insert value to %s[%u]\n",
1277                                  name, i);
1278                         return err;
1279                 }
1280         }
1281         return 0;
1282 }
1283
1284 static int
1285 foreach_key_array_ranges(map_config_func_t func, void *arg,
1286                          const char *name, int map_fd,
1287                          const struct bpf_map_def *pdef,
1288                          struct bpf_map_op *op)
1289 {
1290         unsigned int i, j;
1291         int err;
1292
1293         for (i = 0; i < op->k.array.nr_ranges; i++) {
1294                 unsigned int start = op->k.array.ranges[i].start;
1295                 size_t length = op->k.array.ranges[i].length;
1296
1297                 for (j = 0; j < length; j++) {
1298                         unsigned int idx = start + j;
1299
1300                         err = func(name, map_fd, pdef, op, &idx, arg);
1301                         if (err) {
1302                                 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1303                                          name, idx);
1304                                 return err;
1305                         }
1306                 }
1307         }
1308         return 0;
1309 }
1310
1311 static int
1312 bpf_map_config_foreach_key(struct bpf_map *map,
1313                            map_config_func_t func,
1314                            void *arg)
1315 {
1316         int err, map_fd;
1317         struct bpf_map_op *op;
1318         const struct bpf_map_def *def;
1319         const char *name = bpf_map__name(map);
1320         struct bpf_map_priv *priv = bpf_map__priv(map);
1321
1322         if (IS_ERR(priv)) {
1323                 pr_debug("ERROR: failed to get private from map %s\n", name);
1324                 return -BPF_LOADER_ERRNO__INTERNAL;
1325         }
1326         if (!priv || list_empty(&priv->ops_list)) {
1327                 pr_debug("INFO: nothing to config for map %s\n", name);
1328                 return 0;
1329         }
1330
1331         def = bpf_map__def(map);
1332         if (IS_ERR(def)) {
1333                 pr_debug("ERROR: failed to get definition from map %s\n", name);
1334                 return -BPF_LOADER_ERRNO__INTERNAL;
1335         }
1336         map_fd = bpf_map__fd(map);
1337         if (map_fd < 0) {
1338                 pr_debug("ERROR: failed to get fd from map %s\n", name);
1339                 return map_fd;
1340         }
1341
1342         list_for_each_entry(op, &priv->ops_list, list) {
1343                 switch (def->type) {
1344                 case BPF_MAP_TYPE_ARRAY:
1345                 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1346                         switch (op->key_type) {
1347                         case BPF_MAP_KEY_ALL:
1348                                 err = foreach_key_array_all(func, arg, name,
1349                                                             map_fd, def, op);
1350                                 break;
1351                         case BPF_MAP_KEY_RANGES:
1352                                 err = foreach_key_array_ranges(func, arg, name,
1353                                                                map_fd, def,
1354                                                                op);
1355                                 break;
1356                         default:
1357                                 pr_debug("ERROR: keytype for map '%s' invalid\n",
1358                                          name);
1359                                 return -BPF_LOADER_ERRNO__INTERNAL;
1360                         }
1361                         if (err)
1362                                 return err;
1363                         break;
1364                 default:
1365                         pr_debug("ERROR: type of '%s' incorrect\n", name);
1366                         return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1367                 }
1368         }
1369
1370         return 0;
1371 }
1372
1373 static int
1374 apply_config_value_for_key(int map_fd, void *pkey,
1375                            size_t val_size, u64 val)
1376 {
1377         int err = 0;
1378
1379         switch (val_size) {
1380         case 1: {
1381                 u8 _val = (u8)(val);
1382                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1383                 break;
1384         }
1385         case 2: {
1386                 u16 _val = (u16)(val);
1387                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1388                 break;
1389         }
1390         case 4: {
1391                 u32 _val = (u32)(val);
1392                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1393                 break;
1394         }
1395         case 8: {
1396                 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1397                 break;
1398         }
1399         default:
1400                 pr_debug("ERROR: invalid value size\n");
1401                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1402         }
1403         if (err && errno)
1404                 err = -errno;
1405         return err;
1406 }
1407
1408 static int
1409 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1410                            struct perf_evsel *evsel)
1411 {
1412         struct xyarray *xy = evsel->fd;
1413         struct perf_event_attr *attr;
1414         unsigned int key, events;
1415         bool check_pass = false;
1416         int *evt_fd;
1417         int err;
1418
1419         if (!xy) {
1420                 pr_debug("ERROR: evsel not ready for map %s\n", name);
1421                 return -BPF_LOADER_ERRNO__INTERNAL;
1422         }
1423
1424         if (xy->row_size / xy->entry_size != 1) {
1425                 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1426                          name);
1427                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1428         }
1429
1430         attr = &evsel->attr;
1431         if (attr->inherit) {
1432                 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1433                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1434         }
1435
1436         if (perf_evsel__is_bpf_output(evsel))
1437                 check_pass = true;
1438         if (attr->type == PERF_TYPE_RAW)
1439                 check_pass = true;
1440         if (attr->type == PERF_TYPE_HARDWARE)
1441                 check_pass = true;
1442         if (!check_pass) {
1443                 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1444                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1445         }
1446
1447         events = xy->entries / (xy->row_size / xy->entry_size);
1448         key = *((unsigned int *)pkey);
1449         if (key >= events) {
1450                 pr_debug("ERROR: there is no event %d for map %s\n",
1451                          key, name);
1452                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1453         }
1454         evt_fd = xyarray__entry(xy, key, 0);
1455         err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1456         if (err && errno)
1457                 err = -errno;
1458         return err;
1459 }
1460
1461 static int
1462 apply_obj_config_map_for_key(const char *name, int map_fd,
1463                              const struct bpf_map_def *pdef,
1464                              struct bpf_map_op *op,
1465                              void *pkey, void *arg __maybe_unused)
1466 {
1467         int err;
1468
1469         switch (op->op_type) {
1470         case BPF_MAP_OP_SET_VALUE:
1471                 err = apply_config_value_for_key(map_fd, pkey,
1472                                                  pdef->value_size,
1473                                                  op->v.value);
1474                 break;
1475         case BPF_MAP_OP_SET_EVSEL:
1476                 err = apply_config_evsel_for_key(name, map_fd, pkey,
1477                                                  op->v.evsel);
1478                 break;
1479         default:
1480                 pr_debug("ERROR: unknown value type for '%s'\n", name);
1481                 err = -BPF_LOADER_ERRNO__INTERNAL;
1482         }
1483         return err;
1484 }
1485
1486 static int
1487 apply_obj_config_map(struct bpf_map *map)
1488 {
1489         return bpf_map_config_foreach_key(map,
1490                                           apply_obj_config_map_for_key,
1491                                           NULL);
1492 }
1493
1494 static int
1495 apply_obj_config_object(struct bpf_object *obj)
1496 {
1497         struct bpf_map *map;
1498         int err;
1499
1500         bpf_map__for_each(map, obj) {
1501                 err = apply_obj_config_map(map);
1502                 if (err)
1503                         return err;
1504         }
1505         return 0;
1506 }
1507
1508 int bpf__apply_obj_config(void)
1509 {
1510         struct bpf_object *obj, *tmp;
1511         int err;
1512
1513         bpf_object__for_each_safe(obj, tmp) {
1514                 err = apply_obj_config_object(obj);
1515                 if (err)
1516                         return err;
1517         }
1518
1519         return 0;
1520 }
1521
1522 #define bpf__for_each_map(pos, obj, objtmp)     \
1523         bpf_object__for_each_safe(obj, objtmp)  \
1524                 bpf_map__for_each(pos, obj)
1525
1526 #define bpf__for_each_stdout_map(pos, obj, objtmp)      \
1527         bpf__for_each_map(pos, obj, objtmp)             \
1528                 if (bpf_map__name(pos) &&               \
1529                         (strcmp("__bpf_stdout__",       \
1530                                 bpf_map__name(pos)) == 0))
1531
1532 int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
1533 {
1534         struct bpf_map_priv *tmpl_priv = NULL;
1535         struct bpf_object *obj, *tmp;
1536         struct perf_evsel *evsel = NULL;
1537         struct bpf_map *map;
1538         int err;
1539         bool need_init = false;
1540
1541         bpf__for_each_stdout_map(map, obj, tmp) {
1542                 struct bpf_map_priv *priv = bpf_map__priv(map);
1543
1544                 if (IS_ERR(priv))
1545                         return -BPF_LOADER_ERRNO__INTERNAL;
1546
1547                 /*
1548                  * No need to check map type: type should have been
1549                  * verified by kernel.
1550                  */
1551                 if (!need_init && !priv)
1552                         need_init = !priv;
1553                 if (!tmpl_priv && priv)
1554                         tmpl_priv = priv;
1555         }
1556
1557         if (!need_init)
1558                 return 0;
1559
1560         if (!tmpl_priv) {
1561                 err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
1562                                    NULL);
1563                 if (err) {
1564                         pr_debug("ERROR: failed to create bpf-output event\n");
1565                         return -err;
1566                 }
1567
1568                 evsel = perf_evlist__last(evlist);
1569         }
1570
1571         bpf__for_each_stdout_map(map, obj, tmp) {
1572                 struct bpf_map_priv *priv = bpf_map__priv(map);
1573
1574                 if (IS_ERR(priv))
1575                         return -BPF_LOADER_ERRNO__INTERNAL;
1576                 if (priv)
1577                         continue;
1578
1579                 if (tmpl_priv) {
1580                         priv = bpf_map_priv__clone(tmpl_priv);
1581                         if (!priv)
1582                                 return -ENOMEM;
1583
1584                         err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
1585                         if (err) {
1586                                 bpf_map_priv__clear(map, priv);
1587                                 return err;
1588                         }
1589                 } else if (evsel) {
1590                         struct bpf_map_op *op;
1591
1592                         op = bpf_map__add_newop(map, NULL);
1593                         if (IS_ERR(op))
1594                                 return PTR_ERR(op);
1595                         op->op_type = BPF_MAP_OP_SET_EVSEL;
1596                         op->v.evsel = evsel;
1597                 }
1598         }
1599
1600         return 0;
1601 }
1602
1603 #define ERRNO_OFFSET(e)         ((e) - __BPF_LOADER_ERRNO__START)
1604 #define ERRCODE_OFFSET(c)       ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1605 #define NR_ERRNO        (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1606
1607 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1608         [ERRCODE_OFFSET(CONFIG)]        = "Invalid config string",
1609         [ERRCODE_OFFSET(GROUP)]         = "Invalid group name",
1610         [ERRCODE_OFFSET(EVENTNAME)]     = "No event name found in config string",
1611         [ERRCODE_OFFSET(INTERNAL)]      = "BPF loader internal error",
1612         [ERRCODE_OFFSET(COMPILE)]       = "Error when compiling BPF scriptlet",
1613         [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1614         [ERRCODE_OFFSET(PROLOGUE)]      = "Failed to generate prologue",
1615         [ERRCODE_OFFSET(PROLOGUE2BIG)]  = "Prologue too big for program",
1616         [ERRCODE_OFFSET(PROLOGUEOOB)]   = "Offset out of bound for prologue",
1617         [ERRCODE_OFFSET(OBJCONF_OPT)]   = "Invalid object config option",
1618         [ERRCODE_OFFSET(OBJCONF_CONF)]  = "Config value not set (missing '=')",
1619         [ERRCODE_OFFSET(OBJCONF_MAP_OPT)]       = "Invalid object map config option",
1620         [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)]  = "Target map doesn't exist",
1621         [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)]     = "Incorrect value type for map",
1622         [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)]      = "Incorrect map type",
1623         [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)]   = "Incorrect map key size",
1624         [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1625         [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)]     = "Event not found for map setting",
1626         [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)]   = "Invalid map size for event setting",
1627         [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)]    = "Event dimension too large",
1628         [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)]    = "Doesn't support inherit event",
1629         [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)]   = "Wrong event type for map",
1630         [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)]   = "Index too large",
1631 };
1632
1633 static int
1634 bpf_loader_strerror(int err, char *buf, size_t size)
1635 {
1636         char sbuf[STRERR_BUFSIZE];
1637         const char *msg;
1638
1639         if (!buf || !size)
1640                 return -1;
1641
1642         err = err > 0 ? err : -err;
1643
1644         if (err >= __LIBBPF_ERRNO__START)
1645                 return libbpf_strerror(err, buf, size);
1646
1647         if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1648                 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1649                 snprintf(buf, size, "%s", msg);
1650                 buf[size - 1] = '\0';
1651                 return 0;
1652         }
1653
1654         if (err >= __BPF_LOADER_ERRNO__END)
1655                 snprintf(buf, size, "Unknown bpf loader error %d", err);
1656         else
1657                 snprintf(buf, size, "%s",
1658                          str_error_r(err, sbuf, sizeof(sbuf)));
1659
1660         buf[size - 1] = '\0';
1661         return -1;
1662 }
1663
1664 #define bpf__strerror_head(err, buf, size) \
1665         char sbuf[STRERR_BUFSIZE], *emsg;\
1666         if (!size)\
1667                 return 0;\
1668         if (err < 0)\
1669                 err = -err;\
1670         bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1671         emsg = sbuf;\
1672         switch (err) {\
1673         default:\
1674                 scnprintf(buf, size, "%s", emsg);\
1675                 break;
1676
1677 #define bpf__strerror_entry(val, fmt...)\
1678         case val: {\
1679                 scnprintf(buf, size, fmt);\
1680                 break;\
1681         }
1682
1683 #define bpf__strerror_end(buf, size)\
1684         }\
1685         buf[size - 1] = '\0';
1686
1687 int bpf__strerror_prepare_load(const char *filename, bool source,
1688                                int err, char *buf, size_t size)
1689 {
1690         size_t n;
1691         int ret;
1692
1693         n = snprintf(buf, size, "Failed to load %s%s: ",
1694                          filename, source ? " from source" : "");
1695         if (n >= size) {
1696                 buf[size - 1] = '\0';
1697                 return 0;
1698         }
1699         buf += n;
1700         size -= n;
1701
1702         ret = bpf_loader_strerror(err, buf, size);
1703         buf[size - 1] = '\0';
1704         return ret;
1705 }
1706
1707 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
1708                         int err, char *buf, size_t size)
1709 {
1710         bpf__strerror_head(err, buf, size);
1711         case BPF_LOADER_ERRNO__PROGCONF_TERM: {
1712                 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
1713                 break;
1714         }
1715         bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1716         bpf__strerror_entry(EACCES, "You need to be root");
1717         bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1718         bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
1719         bpf__strerror_end(buf, size);
1720         return 0;
1721 }
1722
1723 int bpf__strerror_load(struct bpf_object *obj,
1724                        int err, char *buf, size_t size)
1725 {
1726         bpf__strerror_head(err, buf, size);
1727         case LIBBPF_ERRNO__KVER: {
1728                 unsigned int obj_kver = bpf_object__kversion(obj);
1729                 unsigned int real_kver;
1730
1731                 if (fetch_kernel_version(&real_kver, NULL, 0)) {
1732                         scnprintf(buf, size, "Unable to fetch kernel version");
1733                         break;
1734                 }
1735
1736                 if (obj_kver != real_kver) {
1737                         scnprintf(buf, size,
1738                                   "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
1739                                   KVER_PARAM(obj_kver),
1740                                   KVER_PARAM(real_kver));
1741                         break;
1742                 }
1743
1744                 scnprintf(buf, size, "Failed to load program for unknown reason");
1745                 break;
1746         }
1747         bpf__strerror_end(buf, size);
1748         return 0;
1749 }
1750
1751 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1752                              struct parse_events_term *term __maybe_unused,
1753                              struct perf_evlist *evlist __maybe_unused,
1754                              int *error_pos __maybe_unused, int err,
1755                              char *buf, size_t size)
1756 {
1757         bpf__strerror_head(err, buf, size);
1758         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
1759                             "Can't use this config term with this map type");
1760         bpf__strerror_end(buf, size);
1761         return 0;
1762 }
1763
1764 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1765 {
1766         bpf__strerror_head(err, buf, size);
1767         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
1768                             "Cannot set event to BPF map in multi-thread tracing");
1769         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
1770                             "%s (Hint: use -i to turn off inherit)", emsg);
1771         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
1772                             "Can only put raw, hardware and BPF output event into a BPF map");
1773         bpf__strerror_end(buf, size);
1774         return 0;
1775 }
1776
1777 int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
1778                                int err, char *buf, size_t size)
1779 {
1780         bpf__strerror_head(err, buf, size);
1781         bpf__strerror_end(buf, size);
1782         return 0;
1783 }