4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
7 BPF_MOV64_IMM(BPF_REG_0, 2),
10 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
14 "calls: not on unpriviledged",
16 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
17 BPF_MOV64_IMM(BPF_REG_0, 1),
19 BPF_MOV64_IMM(BPF_REG_0, 2),
22 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
23 .result_unpriv = REJECT,
28 "calls: div by 0 in subprog",
30 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
31 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
32 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
34 offsetof(struct __sk_buff, data_end)),
35 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
36 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
37 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
38 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
39 BPF_MOV64_IMM(BPF_REG_0, 1),
41 BPF_MOV32_IMM(BPF_REG_2, 0),
42 BPF_MOV32_IMM(BPF_REG_3, 1),
43 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
44 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
45 offsetof(struct __sk_buff, data)),
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
53 "calls: multiple ret types in subprog 1",
55 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
56 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
58 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
59 offsetof(struct __sk_buff, data_end)),
60 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
61 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
62 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
63 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
64 BPF_MOV64_IMM(BPF_REG_0, 1),
66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 offsetof(struct __sk_buff, data)),
68 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
69 BPF_MOV32_IMM(BPF_REG_0, 42),
72 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
74 .errstr = "R0 invalid mem access 'inv'",
77 "calls: multiple ret types in subprog 2",
79 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
80 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
81 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
82 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
83 offsetof(struct __sk_buff, data_end)),
84 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
85 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
86 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
87 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
88 BPF_MOV64_IMM(BPF_REG_0, 1),
90 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
91 offsetof(struct __sk_buff, data)),
92 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
93 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
94 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
95 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
96 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
97 BPF_LD_MAP_FD(BPF_REG_1, 0),
98 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
99 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
100 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
101 offsetof(struct __sk_buff, data)),
102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
106 .fixup_map_hash_8b = { 16 },
108 .errstr = "R0 min value is outside of the array range",
111 "calls: overlapping caller/callee",
113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
114 BPF_MOV64_IMM(BPF_REG_0, 1),
117 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
118 .errstr = "last insn is not an exit or jmp",
122 "calls: wrong recursive calls",
124 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
125 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
129 BPF_MOV64_IMM(BPF_REG_0, 1),
132 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
133 .errstr = "jump out of range",
137 "calls: wrong src reg",
139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
140 BPF_MOV64_IMM(BPF_REG_0, 1),
143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
144 .errstr = "BPF_CALL uses reserved fields",
148 "calls: wrong off value",
150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
151 BPF_MOV64_IMM(BPF_REG_0, 1),
153 BPF_MOV64_IMM(BPF_REG_0, 2),
156 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
157 .errstr = "BPF_CALL uses reserved fields",
161 "calls: jump back loop",
163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
164 BPF_MOV64_IMM(BPF_REG_0, 1),
167 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
168 .errstr = "back-edge from insn 0 to 0",
172 "calls: conditional call",
174 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
175 offsetof(struct __sk_buff, mark)),
176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
178 BPF_MOV64_IMM(BPF_REG_0, 1),
180 BPF_MOV64_IMM(BPF_REG_0, 2),
183 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
184 .errstr = "jump out of range",
188 "calls: conditional call 2",
190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 offsetof(struct __sk_buff, mark)),
192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
194 BPF_MOV64_IMM(BPF_REG_0, 1),
196 BPF_MOV64_IMM(BPF_REG_0, 2),
198 BPF_MOV64_IMM(BPF_REG_0, 3),
201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
205 "calls: conditional call 3",
207 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
208 offsetof(struct __sk_buff, mark)),
209 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
210 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
211 BPF_MOV64_IMM(BPF_REG_0, 1),
213 BPF_MOV64_IMM(BPF_REG_0, 1),
214 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
215 BPF_MOV64_IMM(BPF_REG_0, 3),
216 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
218 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
219 .errstr = "back-edge from insn",
223 "calls: conditional call 4",
225 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
226 offsetof(struct __sk_buff, mark)),
227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
228 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
229 BPF_MOV64_IMM(BPF_REG_0, 1),
231 BPF_MOV64_IMM(BPF_REG_0, 1),
232 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
233 BPF_MOV64_IMM(BPF_REG_0, 3),
236 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
240 "calls: conditional call 5",
242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
243 offsetof(struct __sk_buff, mark)),
244 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
246 BPF_MOV64_IMM(BPF_REG_0, 1),
248 BPF_MOV64_IMM(BPF_REG_0, 1),
249 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
250 BPF_MOV64_IMM(BPF_REG_0, 3),
253 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
254 .errstr = "back-edge from insn",
258 "calls: conditional call 6",
260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
263 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
264 offsetof(struct __sk_buff, mark)),
267 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
268 .errstr = "back-edge from insn",
272 "calls: using r0 returned by callee",
274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
276 BPF_MOV64_IMM(BPF_REG_0, 2),
279 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
283 "calls: using uninit r0 from callee",
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
289 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
290 .errstr = "!read_ok",
294 "calls: callee is using r1",
296 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
298 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
299 offsetof(struct __sk_buff, len)),
302 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
304 .retval = TEST_DATA_LEN,
307 "calls: callee using args1",
309 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
311 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
314 .errstr_unpriv = "allowed for root only",
315 .result_unpriv = REJECT,
317 .retval = POINTER_VALUE,
320 "calls: callee using wrong args2",
322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
324 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
327 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
328 .errstr = "R2 !read_ok",
332 "calls: callee using two args",
334 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
335 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
336 offsetof(struct __sk_buff, len)),
337 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
338 offsetof(struct __sk_buff, len)),
339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
341 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
342 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
345 .errstr_unpriv = "allowed for root only",
346 .result_unpriv = REJECT,
348 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
351 "calls: callee changing pkt pointers",
353 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
354 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
355 offsetof(struct xdp_md, data_end)),
356 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
358 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
360 /* clear_all_pkt_pointers() has to walk all frames
361 * to make sure that pkt pointers in the caller
362 * are cleared when callee is calling a helper that
363 * adjusts packet size
365 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
366 BPF_MOV32_IMM(BPF_REG_0, 0),
368 BPF_MOV64_IMM(BPF_REG_2, 0),
369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
373 .errstr = "R6 invalid mem access 'inv'",
374 .prog_type = BPF_PROG_TYPE_XDP,
375 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
378 "calls: two calls with args",
380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
382 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
384 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
385 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
386 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
387 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
388 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
391 offsetof(struct __sk_buff, len)),
394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
396 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
399 "calls: calls with stack arith",
401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
409 BPF_MOV64_IMM(BPF_REG_0, 42),
410 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
413 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
418 "calls: calls with misaligned stack access",
420 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
428 BPF_MOV64_IMM(BPF_REG_0, 42),
429 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
432 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
433 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
434 .errstr = "misaligned stack access",
438 "calls: calls control flow, jump test",
440 BPF_MOV64_IMM(BPF_REG_0, 42),
441 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
442 BPF_MOV64_IMM(BPF_REG_0, 43),
443 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
444 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
447 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
452 "calls: calls control flow, jump test 2",
454 BPF_MOV64_IMM(BPF_REG_0, 42),
455 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
456 BPF_MOV64_IMM(BPF_REG_0, 43),
457 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
461 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
462 .errstr = "jump out of range from insn 1 to 4",
466 "calls: two calls with bad jump",
468 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
470 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
471 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
472 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
475 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
476 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
479 offsetof(struct __sk_buff, len)),
480 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
483 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
484 .errstr = "jump out of range from insn 11 to 9",
488 "calls: recursive call. test1",
490 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
495 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
496 .errstr = "back-edge",
500 "calls: recursive call. test2",
502 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
504 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
507 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
508 .errstr = "back-edge",
512 "calls: unreachable code",
514 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
516 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
518 BPF_MOV64_IMM(BPF_REG_0, 0),
520 BPF_MOV64_IMM(BPF_REG_0, 0),
523 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
524 .errstr = "unreachable insn 6",
528 "calls: invalid call",
530 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
535 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
536 .errstr = "invalid destination",
540 "calls: invalid call 2",
542 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
547 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
548 .errstr = "invalid destination",
552 "calls: jumping across function bodies. test1",
554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
555 BPF_MOV64_IMM(BPF_REG_0, 0),
557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
560 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
561 .errstr = "jump out of range",
565 "calls: jumping across function bodies. test2",
567 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
569 BPF_MOV64_IMM(BPF_REG_0, 0),
573 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
574 .errstr = "jump out of range",
578 "calls: call without exit",
580 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
584 BPF_MOV64_IMM(BPF_REG_0, 0),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
587 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
588 .errstr = "not an exit",
592 "calls: call into middle of ld_imm64",
594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
595 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
596 BPF_MOV64_IMM(BPF_REG_0, 0),
598 BPF_LD_IMM64(BPF_REG_0, 0),
601 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
602 .errstr = "last insn",
606 "calls: call into middle of other call",
608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
609 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
610 BPF_MOV64_IMM(BPF_REG_0, 0),
612 BPF_MOV64_IMM(BPF_REG_0, 0),
613 BPF_MOV64_IMM(BPF_REG_0, 0),
616 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
617 .errstr = "last insn",
621 "calls: ld_abs with changing ctx data in callee",
623 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
624 BPF_LD_ABS(BPF_B, 0),
625 BPF_LD_ABS(BPF_H, 0),
626 BPF_LD_ABS(BPF_W, 0),
627 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
629 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
630 BPF_LD_ABS(BPF_B, 0),
631 BPF_LD_ABS(BPF_H, 0),
632 BPF_LD_ABS(BPF_W, 0),
634 BPF_MOV64_IMM(BPF_REG_2, 1),
635 BPF_MOV64_IMM(BPF_REG_3, 2),
636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
640 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
644 "calls: two calls with bad fallthrough",
646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
648 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
650 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
651 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
652 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
653 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
654 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
655 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
656 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
657 offsetof(struct __sk_buff, len)),
660 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
661 .errstr = "not an exit",
665 "calls: two calls with stack read",
667 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
670 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
672 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
673 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
674 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
675 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
677 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
678 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
680 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
683 .prog_type = BPF_PROG_TYPE_XDP,
687 "calls: two calls with stack write",
690 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
692 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
696 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
700 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
701 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
703 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
705 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
706 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
708 /* write into stack frame of main prog */
709 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
713 /* read from stack frame of main prog */
714 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
717 .prog_type = BPF_PROG_TYPE_XDP,
721 "calls: stack overflow using two frames (pre-call access)",
724 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
725 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
729 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
730 BPF_MOV64_IMM(BPF_REG_0, 0),
733 .prog_type = BPF_PROG_TYPE_XDP,
734 .errstr = "combined stack size",
738 "calls: stack overflow using two frames (post-call access)",
741 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
742 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
746 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
747 BPF_MOV64_IMM(BPF_REG_0, 0),
750 .prog_type = BPF_PROG_TYPE_XDP,
751 .errstr = "combined stack size",
755 "calls: stack depth check using three frames. test1",
758 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
759 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
760 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
761 BPF_MOV64_IMM(BPF_REG_0, 0),
764 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
767 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
768 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
771 .prog_type = BPF_PROG_TYPE_XDP,
772 /* stack_main=32, stack_A=256, stack_B=64
773 * and max(main+A, main+A+B) < 512
778 "calls: stack depth check using three frames. test2",
781 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
782 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
783 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
784 BPF_MOV64_IMM(BPF_REG_0, 0),
787 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
790 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
791 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
794 .prog_type = BPF_PROG_TYPE_XDP,
795 /* stack_main=32, stack_A=64, stack_B=256
796 * and max(main+A, main+A+B) < 512
801 "calls: stack depth check using three frames. test3",
804 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
805 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
807 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
808 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
809 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
810 BPF_MOV64_IMM(BPF_REG_0, 0),
813 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
815 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
816 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
818 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
819 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
820 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
823 .prog_type = BPF_PROG_TYPE_XDP,
824 /* stack_main=64, stack_A=224, stack_B=256
825 * and max(main+A, main+A+B) > 512
827 .errstr = "combined stack",
831 "calls: stack depth check using three frames. test4",
837 * void func1(int alloc_or_recurse) {
838 * if (alloc_or_recurse) {
839 * frame_pointer[-300] = 1;
841 * func2(alloc_or_recurse);
844 * void func2(int alloc_or_recurse) {
845 * if (alloc_or_recurse) {
846 * frame_pointer[-300] = 1;
852 BPF_MOV64_IMM(BPF_REG_1, 0),
853 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
854 BPF_MOV64_IMM(BPF_REG_1, 1),
855 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
856 BPF_MOV64_IMM(BPF_REG_1, 1),
857 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
858 BPF_MOV64_IMM(BPF_REG_0, 0),
861 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
862 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
864 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
867 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
868 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
871 .prog_type = BPF_PROG_TYPE_XDP,
873 .errstr = "combined stack",
876 "calls: stack depth check using three frames. test5",
879 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
882 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
885 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
888 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
891 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
894 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
897 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
900 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
903 BPF_MOV64_IMM(BPF_REG_0, 0),
906 .prog_type = BPF_PROG_TYPE_XDP,
907 .errstr = "call stack",
911 "calls: spill into caller stack frame",
913 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
914 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
916 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
918 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
919 BPF_MOV64_IMM(BPF_REG_0, 0),
922 .prog_type = BPF_PROG_TYPE_XDP,
923 .errstr = "cannot spill",
927 "calls: write into caller stack frame",
929 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
931 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
933 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
935 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
936 BPF_MOV64_IMM(BPF_REG_0, 0),
939 .prog_type = BPF_PROG_TYPE_XDP,
944 "calls: write into callee stack frame",
946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
947 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
949 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
953 .prog_type = BPF_PROG_TYPE_XDP,
954 .errstr = "cannot return stack pointer",
958 "calls: two calls with stack write and void return",
961 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
964 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
967 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
971 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
972 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
973 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
974 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
975 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
979 /* write into stack frame of main prog */
980 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
981 BPF_EXIT_INSN(), /* void return */
983 .prog_type = BPF_PROG_TYPE_XDP,
987 "calls: ambiguous return value",
989 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
996 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
997 BPF_MOV64_IMM(BPF_REG_0, 0),
1000 .errstr_unpriv = "allowed for root only",
1001 .result_unpriv = REJECT,
1002 .errstr = "R0 !read_ok",
1006 "calls: two calls that return map_value",
1009 /* pass fp-16, fp-8 into a function */
1010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1016 /* fetch map_value_ptr from the stack of this function */
1017 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1019 /* write into map value */
1020 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1021 /* fetch secound map_value_ptr from the stack */
1022 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1024 /* write into map value */
1025 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1026 BPF_MOV64_IMM(BPF_REG_0, 0),
1030 /* call 3rd function twice */
1031 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1032 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1033 /* first time with fp-8 */
1034 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1036 /* second time with fp-16 */
1037 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1041 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1042 /* lookup from map */
1043 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1044 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1045 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1046 BPF_LD_MAP_FD(BPF_REG_1, 0),
1047 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1048 /* write map_value_ptr into stack frame of main prog */
1049 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1050 BPF_MOV64_IMM(BPF_REG_0, 0),
1051 BPF_EXIT_INSN(), /* return 0 */
1053 .prog_type = BPF_PROG_TYPE_XDP,
1054 .fixup_map_hash_8b = { 23 },
1058 "calls: two calls that return map_value with bool condition",
1061 /* pass fp-16, fp-8 into a function */
1062 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1064 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1066 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1067 BPF_MOV64_IMM(BPF_REG_0, 0),
1071 /* call 3rd function twice */
1072 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1073 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1074 /* first time with fp-8 */
1075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1076 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1077 /* fetch map_value_ptr from the stack of this function */
1078 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1079 /* write into map value */
1080 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1082 /* second time with fp-16 */
1083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1084 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1085 /* fetch secound map_value_ptr from the stack */
1086 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1087 /* write into map value */
1088 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1092 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1093 /* lookup from map */
1094 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1095 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1097 BPF_LD_MAP_FD(BPF_REG_1, 0),
1098 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1099 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1100 BPF_MOV64_IMM(BPF_REG_0, 0),
1101 BPF_EXIT_INSN(), /* return 0 */
1102 /* write map_value_ptr into stack frame of main prog */
1103 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1104 BPF_MOV64_IMM(BPF_REG_0, 1),
1105 BPF_EXIT_INSN(), /* return 1 */
1107 .prog_type = BPF_PROG_TYPE_XDP,
1108 .fixup_map_hash_8b = { 23 },
1112 "calls: two calls that return map_value with incorrect bool check",
1115 /* pass fp-16, fp-8 into a function */
1116 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1118 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1120 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1121 BPF_MOV64_IMM(BPF_REG_0, 0),
1125 /* call 3rd function twice */
1126 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1127 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1128 /* first time with fp-8 */
1129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1130 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1131 /* fetch map_value_ptr from the stack of this function */
1132 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1133 /* write into map value */
1134 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1135 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1136 /* second time with fp-16 */
1137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1138 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1139 /* fetch secound map_value_ptr from the stack */
1140 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1141 /* write into map value */
1142 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1146 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1147 /* lookup from map */
1148 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1149 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1151 BPF_LD_MAP_FD(BPF_REG_1, 0),
1152 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1153 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1154 BPF_MOV64_IMM(BPF_REG_0, 0),
1155 BPF_EXIT_INSN(), /* return 0 */
1156 /* write map_value_ptr into stack frame of main prog */
1157 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1158 BPF_MOV64_IMM(BPF_REG_0, 1),
1159 BPF_EXIT_INSN(), /* return 1 */
1161 .prog_type = BPF_PROG_TYPE_XDP,
1162 .fixup_map_hash_8b = { 23 },
1164 .errstr = "invalid read from stack off -16+0 size 8",
1167 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1170 /* pass fp-16, fp-8 into a function */
1171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1173 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1176 BPF_MOV64_IMM(BPF_REG_0, 0),
1180 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1181 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1182 /* 1st lookup from map */
1183 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1184 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1186 BPF_LD_MAP_FD(BPF_REG_1, 0),
1187 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1188 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1189 BPF_MOV64_IMM(BPF_REG_8, 0),
1190 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1191 /* write map_value_ptr into stack frame of main prog at fp-8 */
1192 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1193 BPF_MOV64_IMM(BPF_REG_8, 1),
1195 /* 2nd lookup from map */
1196 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1198 BPF_LD_MAP_FD(BPF_REG_1, 0),
1199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1200 BPF_FUNC_map_lookup_elem),
1201 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1202 BPF_MOV64_IMM(BPF_REG_9, 0),
1203 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1204 /* write map_value_ptr into stack frame of main prog at fp-16 */
1205 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1206 BPF_MOV64_IMM(BPF_REG_9, 1),
1208 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1210 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1211 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1212 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1217 /* if arg2 == 1 do *arg1 = 0 */
1218 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1219 /* fetch map_value_ptr from the stack of this function */
1220 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1221 /* write into map value */
1222 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1224 /* if arg4 == 1 do *arg3 = 0 */
1225 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1226 /* fetch map_value_ptr from the stack of this function */
1227 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1228 /* write into map value */
1229 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1232 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1233 .fixup_map_hash_8b = { 12, 22 },
1235 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1236 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1239 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1242 /* pass fp-16, fp-8 into a function */
1243 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1245 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1248 BPF_MOV64_IMM(BPF_REG_0, 0),
1252 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1253 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1254 /* 1st lookup from map */
1255 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1256 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1258 BPF_LD_MAP_FD(BPF_REG_1, 0),
1259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1260 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1261 BPF_MOV64_IMM(BPF_REG_8, 0),
1262 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1263 /* write map_value_ptr into stack frame of main prog at fp-8 */
1264 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1265 BPF_MOV64_IMM(BPF_REG_8, 1),
1267 /* 2nd lookup from map */
1268 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1270 BPF_LD_MAP_FD(BPF_REG_1, 0),
1271 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1272 BPF_FUNC_map_lookup_elem),
1273 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1274 BPF_MOV64_IMM(BPF_REG_9, 0),
1275 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1276 /* write map_value_ptr into stack frame of main prog at fp-16 */
1277 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1278 BPF_MOV64_IMM(BPF_REG_9, 1),
1280 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1281 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1283 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1284 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1289 /* if arg2 == 1 do *arg1 = 0 */
1290 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1291 /* fetch map_value_ptr from the stack of this function */
1292 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1293 /* write into map value */
1294 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1296 /* if arg4 == 1 do *arg3 = 0 */
1297 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1298 /* fetch map_value_ptr from the stack of this function */
1299 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1300 /* write into map value */
1301 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1304 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1305 .fixup_map_hash_8b = { 12, 22 },
1309 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1312 /* pass fp-16, fp-8 into a function */
1313 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1315 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1317 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1318 BPF_MOV64_IMM(BPF_REG_0, 0),
1322 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1323 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1324 /* 1st lookup from map */
1325 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1328 BPF_LD_MAP_FD(BPF_REG_1, 0),
1329 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1330 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1331 BPF_MOV64_IMM(BPF_REG_8, 0),
1332 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1333 /* write map_value_ptr into stack frame of main prog at fp-8 */
1334 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1335 BPF_MOV64_IMM(BPF_REG_8, 1),
1337 /* 2nd lookup from map */
1338 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1340 BPF_LD_MAP_FD(BPF_REG_1, 0),
1341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1342 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1343 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1344 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1345 /* write map_value_ptr into stack frame of main prog at fp-16 */
1346 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1347 BPF_MOV64_IMM(BPF_REG_9, 1),
1349 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1350 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1351 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1352 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1353 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1354 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1355 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1358 /* if arg2 == 1 do *arg1 = 0 */
1359 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1360 /* fetch map_value_ptr from the stack of this function */
1361 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1362 /* write into map value */
1363 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1365 /* if arg4 == 1 do *arg3 = 0 */
1366 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1367 /* fetch map_value_ptr from the stack of this function */
1368 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1369 /* write into map value */
1370 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1371 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1373 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1374 .fixup_map_hash_8b = { 12, 22 },
1376 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1377 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1380 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1383 /* pass fp-16, fp-8 into a function */
1384 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1388 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1389 BPF_MOV64_IMM(BPF_REG_0, 0),
1393 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1394 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1395 /* 1st lookup from map */
1396 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1397 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1399 BPF_LD_MAP_FD(BPF_REG_1, 0),
1400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1401 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1402 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1403 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1404 BPF_MOV64_IMM(BPF_REG_8, 0),
1405 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1406 BPF_MOV64_IMM(BPF_REG_8, 1),
1408 /* 2nd lookup from map */
1409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1411 BPF_LD_MAP_FD(BPF_REG_1, 0),
1412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1413 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1414 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1415 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1416 BPF_MOV64_IMM(BPF_REG_9, 0),
1417 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1418 BPF_MOV64_IMM(BPF_REG_9, 1),
1420 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1422 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1423 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1424 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1429 /* if arg2 == 1 do *arg1 = 0 */
1430 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1431 /* fetch map_value_ptr from the stack of this function */
1432 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1433 /* write into map value */
1434 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1436 /* if arg4 == 1 do *arg3 = 0 */
1437 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1438 /* fetch map_value_ptr from the stack of this function */
1439 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1440 /* write into map value */
1441 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1444 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1445 .fixup_map_hash_8b = { 12, 22 },
1449 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1452 /* pass fp-16, fp-8 into a function */
1453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1457 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1458 BPF_MOV64_IMM(BPF_REG_0, 0),
1462 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1463 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1464 /* 1st lookup from map */
1465 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1466 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1468 BPF_LD_MAP_FD(BPF_REG_1, 0),
1469 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1470 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1471 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1472 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1473 BPF_MOV64_IMM(BPF_REG_8, 0),
1474 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1475 BPF_MOV64_IMM(BPF_REG_8, 1),
1477 /* 2nd lookup from map */
1478 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1480 BPF_LD_MAP_FD(BPF_REG_1, 0),
1481 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1482 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1483 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1484 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1485 BPF_MOV64_IMM(BPF_REG_9, 0),
1486 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1487 BPF_MOV64_IMM(BPF_REG_9, 1),
1489 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1491 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1492 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1493 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1498 /* if arg2 == 1 do *arg1 = 0 */
1499 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1500 /* fetch map_value_ptr from the stack of this function */
1501 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1502 /* write into map value */
1503 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1505 /* if arg4 == 0 do *arg3 = 0 */
1506 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1507 /* fetch map_value_ptr from the stack of this function */
1508 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1509 /* write into map value */
1510 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1513 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1514 .fixup_map_hash_8b = { 12, 22 },
1516 .errstr = "R0 invalid mem access 'inv'",
1519 "calls: pkt_ptr spill into caller stack",
1521 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1527 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1528 offsetof(struct __sk_buff, data)),
1529 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1530 offsetof(struct __sk_buff, data_end)),
1531 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1533 /* spill unchecked pkt_ptr into stack of caller */
1534 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1535 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1536 /* now the pkt range is verified, read pkt_ptr from stack */
1537 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1538 /* write 4 bytes into packet */
1539 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1543 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1544 .retval = POINTER_VALUE,
1545 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1548 "calls: pkt_ptr spill into caller stack 2",
1550 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1553 /* Marking is still kept, but not in all cases safe. */
1554 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1555 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1559 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1560 offsetof(struct __sk_buff, data)),
1561 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1562 offsetof(struct __sk_buff, data_end)),
1563 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1565 /* spill unchecked pkt_ptr into stack of caller */
1566 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1567 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1568 /* now the pkt range is verified, read pkt_ptr from stack */
1569 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1570 /* write 4 bytes into packet */
1571 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1574 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1575 .errstr = "invalid access to packet",
1577 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1580 "calls: pkt_ptr spill into caller stack 3",
1582 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1584 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1586 /* Marking is still kept and safe here. */
1587 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1588 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1592 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1593 offsetof(struct __sk_buff, data)),
1594 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1595 offsetof(struct __sk_buff, data_end)),
1596 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1598 /* spill unchecked pkt_ptr into stack of caller */
1599 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1600 BPF_MOV64_IMM(BPF_REG_5, 0),
1601 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1602 BPF_MOV64_IMM(BPF_REG_5, 1),
1603 /* now the pkt range is verified, read pkt_ptr from stack */
1604 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1605 /* write 4 bytes into packet */
1606 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1607 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1610 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1613 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1616 "calls: pkt_ptr spill into caller stack 4",
1618 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1622 /* Check marking propagated. */
1623 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1624 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1628 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1629 offsetof(struct __sk_buff, data)),
1630 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1631 offsetof(struct __sk_buff, data_end)),
1632 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1634 /* spill unchecked pkt_ptr into stack of caller */
1635 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1636 BPF_MOV64_IMM(BPF_REG_5, 0),
1637 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1638 BPF_MOV64_IMM(BPF_REG_5, 1),
1639 /* don't read back pkt_ptr from stack here */
1640 /* write 4 bytes into packet */
1641 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1642 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1645 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1648 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1651 "calls: pkt_ptr spill into caller stack 5",
1653 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1655 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1656 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1657 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1658 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1662 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1663 offsetof(struct __sk_buff, data)),
1664 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1665 offsetof(struct __sk_buff, data_end)),
1666 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1668 BPF_MOV64_IMM(BPF_REG_5, 0),
1669 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1670 /* spill checked pkt_ptr into stack of caller */
1671 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1672 BPF_MOV64_IMM(BPF_REG_5, 1),
1673 /* don't read back pkt_ptr from stack here */
1674 /* write 4 bytes into packet */
1675 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1676 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1679 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1680 .errstr = "same insn cannot be used with different",
1682 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1685 "calls: pkt_ptr spill into caller stack 6",
1687 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1688 offsetof(struct __sk_buff, data_end)),
1689 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1691 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1693 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1694 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1698 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1699 offsetof(struct __sk_buff, data)),
1700 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1701 offsetof(struct __sk_buff, data_end)),
1702 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1704 BPF_MOV64_IMM(BPF_REG_5, 0),
1705 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1706 /* spill checked pkt_ptr into stack of caller */
1707 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1708 BPF_MOV64_IMM(BPF_REG_5, 1),
1709 /* don't read back pkt_ptr from stack here */
1710 /* write 4 bytes into packet */
1711 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1712 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1715 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1716 .errstr = "R4 invalid mem access",
1718 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1721 "calls: pkt_ptr spill into caller stack 7",
1723 BPF_MOV64_IMM(BPF_REG_2, 0),
1724 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1726 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1727 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1728 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1729 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1733 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1734 offsetof(struct __sk_buff, data)),
1735 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1736 offsetof(struct __sk_buff, data_end)),
1737 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1739 BPF_MOV64_IMM(BPF_REG_5, 0),
1740 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1741 /* spill checked pkt_ptr into stack of caller */
1742 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1743 BPF_MOV64_IMM(BPF_REG_5, 1),
1744 /* don't read back pkt_ptr from stack here */
1745 /* write 4 bytes into packet */
1746 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1747 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1750 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1751 .errstr = "R4 invalid mem access",
1753 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1756 "calls: pkt_ptr spill into caller stack 8",
1758 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1759 offsetof(struct __sk_buff, data)),
1760 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1761 offsetof(struct __sk_buff, data_end)),
1762 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1763 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1764 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1766 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1768 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1770 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1771 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1775 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1776 offsetof(struct __sk_buff, data)),
1777 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1778 offsetof(struct __sk_buff, data_end)),
1779 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1781 BPF_MOV64_IMM(BPF_REG_5, 0),
1782 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1783 /* spill checked pkt_ptr into stack of caller */
1784 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1785 BPF_MOV64_IMM(BPF_REG_5, 1),
1786 /* don't read back pkt_ptr from stack here */
1787 /* write 4 bytes into packet */
1788 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1789 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1792 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1794 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1797 "calls: pkt_ptr spill into caller stack 9",
1799 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1800 offsetof(struct __sk_buff, data)),
1801 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1802 offsetof(struct __sk_buff, data_end)),
1803 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1805 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1807 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1809 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1811 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1812 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1816 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1817 offsetof(struct __sk_buff, data)),
1818 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1819 offsetof(struct __sk_buff, data_end)),
1820 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1822 BPF_MOV64_IMM(BPF_REG_5, 0),
1823 /* spill unchecked pkt_ptr into stack of caller */
1824 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1825 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1826 BPF_MOV64_IMM(BPF_REG_5, 1),
1827 /* don't read back pkt_ptr from stack here */
1828 /* write 4 bytes into packet */
1829 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1833 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1834 .errstr = "invalid access to packet",
1836 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1839 "calls: caller stack init to zero or map_value_or_null",
1841 BPF_MOV64_IMM(BPF_REG_0, 0),
1842 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
1843 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1846 /* fetch map_value_or_null or const_zero from stack */
1847 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1849 /* store into map_value */
1850 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
1854 /* if (ctx == 0) return; */
1855 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
1856 /* else bpf_map_lookup() and *(fp - 8) = r0 */
1857 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
1858 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1860 BPF_LD_MAP_FD(BPF_REG_1, 0),
1861 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1862 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1863 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1864 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1867 .fixup_map_hash_8b = { 13 },
1869 .prog_type = BPF_PROG_TYPE_XDP,
1872 "calls: stack init to zero and pruning",
1874 /* first make allocated_stack 16 byte */
1875 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
1876 /* now fork the execution such that the false branch
1877 * of JGT insn will be verified second and it skisp zero
1878 * init of fp-8 stack slot. If stack liveness marking
1879 * is missing live_read marks from call map_lookup
1880 * processing then pruning will incorrectly assume
1881 * that fp-8 stack slot was unused in the fall-through
1882 * branch and will accept the program incorrectly
1884 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
1885 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1886 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1887 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1889 BPF_LD_MAP_FD(BPF_REG_1, 0),
1890 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1893 .fixup_map_hash_48b = { 6 },
1894 .errstr = "invalid indirect read from stack off -8+0 size 8",
1896 .prog_type = BPF_PROG_TYPE_XDP,
1899 "calls: ctx read at start of subprog",
1901 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1903 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
1904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1906 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1908 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1909 BPF_MOV64_IMM(BPF_REG_0, 0),
1912 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1913 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1914 .result_unpriv = REJECT,
1918 "calls: cross frame pruning",
1925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1926 BPF_MOV64_IMM(BPF_REG_8, 0),
1927 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1928 BPF_MOV64_IMM(BPF_REG_8, 1),
1929 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
1930 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1931 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
1932 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1933 BPF_MOV64_IMM(BPF_REG_0, 0),
1935 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1938 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1939 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1940 .errstr = "!read_ok",
1944 "calls: cross frame pruning - liveness propagation",
1946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1947 BPF_MOV64_IMM(BPF_REG_8, 0),
1948 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1949 BPF_MOV64_IMM(BPF_REG_8, 1),
1950 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1951 BPF_MOV64_IMM(BPF_REG_9, 0),
1952 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1953 BPF_MOV64_IMM(BPF_REG_9, 1),
1954 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1956 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
1957 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
1958 BPF_MOV64_IMM(BPF_REG_0, 0),
1960 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1963 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1964 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1965 .errstr = "!read_ok",