2 "access skb fields ok",
4 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5 offsetof(struct __sk_buff, len)),
6 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
7 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8 offsetof(struct __sk_buff, mark)),
9 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
10 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11 offsetof(struct __sk_buff, pkt_type)),
12 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
13 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14 offsetof(struct __sk_buff, queue_mapping)),
15 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
16 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
17 offsetof(struct __sk_buff, protocol)),
18 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
19 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
20 offsetof(struct __sk_buff, vlan_present)),
21 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
22 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
23 offsetof(struct __sk_buff, vlan_tci)),
24 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
25 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
26 offsetof(struct __sk_buff, napi_id)),
27 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
33 "access skb fields bad1",
35 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
38 .errstr = "invalid bpf_context access",
42 "access skb fields bad2",
44 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
45 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
46 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
47 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
48 BPF_LD_MAP_FD(BPF_REG_1, 0),
49 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
50 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
52 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
53 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
54 offsetof(struct __sk_buff, pkt_type)),
57 .fixup_map_hash_8b = { 4 },
58 .errstr = "different pointers",
59 .errstr_unpriv = "R1 pointer comparison",
63 "access skb fields bad3",
65 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 offsetof(struct __sk_buff, pkt_type)),
69 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
70 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
71 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
72 BPF_LD_MAP_FD(BPF_REG_1, 0),
73 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
74 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
76 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
77 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
79 .fixup_map_hash_8b = { 6 },
80 .errstr = "different pointers",
81 .errstr_unpriv = "R1 pointer comparison",
85 "access skb fields bad4",
87 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
88 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
89 offsetof(struct __sk_buff, len)),
90 BPF_MOV64_IMM(BPF_REG_0, 0),
92 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
93 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
94 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
95 BPF_LD_MAP_FD(BPF_REG_1, 0),
96 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
97 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
99 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
100 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
102 .fixup_map_hash_8b = { 7 },
103 .errstr = "different pointers",
104 .errstr_unpriv = "R1 pointer comparison",
108 "invalid access __sk_buff family",
110 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
111 offsetof(struct __sk_buff, family)),
114 .errstr = "invalid bpf_context access",
118 "invalid access __sk_buff remote_ip4",
120 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
121 offsetof(struct __sk_buff, remote_ip4)),
124 .errstr = "invalid bpf_context access",
128 "invalid access __sk_buff local_ip4",
130 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
131 offsetof(struct __sk_buff, local_ip4)),
134 .errstr = "invalid bpf_context access",
138 "invalid access __sk_buff remote_ip6",
140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
141 offsetof(struct __sk_buff, remote_ip6)),
144 .errstr = "invalid bpf_context access",
148 "invalid access __sk_buff local_ip6",
150 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
151 offsetof(struct __sk_buff, local_ip6)),
154 .errstr = "invalid bpf_context access",
158 "invalid access __sk_buff remote_port",
160 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
161 offsetof(struct __sk_buff, remote_port)),
164 .errstr = "invalid bpf_context access",
168 "invalid access __sk_buff remote_port",
170 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
171 offsetof(struct __sk_buff, local_port)),
174 .errstr = "invalid bpf_context access",
178 "valid access __sk_buff family",
180 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
181 offsetof(struct __sk_buff, family)),
185 .prog_type = BPF_PROG_TYPE_SK_SKB,
188 "valid access __sk_buff remote_ip4",
190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 offsetof(struct __sk_buff, remote_ip4)),
195 .prog_type = BPF_PROG_TYPE_SK_SKB,
198 "valid access __sk_buff local_ip4",
200 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
201 offsetof(struct __sk_buff, local_ip4)),
205 .prog_type = BPF_PROG_TYPE_SK_SKB,
208 "valid access __sk_buff remote_ip6",
210 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
211 offsetof(struct __sk_buff, remote_ip6[0])),
212 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
213 offsetof(struct __sk_buff, remote_ip6[1])),
214 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
215 offsetof(struct __sk_buff, remote_ip6[2])),
216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
217 offsetof(struct __sk_buff, remote_ip6[3])),
221 .prog_type = BPF_PROG_TYPE_SK_SKB,
224 "valid access __sk_buff local_ip6",
226 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
227 offsetof(struct __sk_buff, local_ip6[0])),
228 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
229 offsetof(struct __sk_buff, local_ip6[1])),
230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
231 offsetof(struct __sk_buff, local_ip6[2])),
232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
233 offsetof(struct __sk_buff, local_ip6[3])),
237 .prog_type = BPF_PROG_TYPE_SK_SKB,
240 "valid access __sk_buff remote_port",
242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
243 offsetof(struct __sk_buff, remote_port)),
247 .prog_type = BPF_PROG_TYPE_SK_SKB,
250 "valid access __sk_buff remote_port",
252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
253 offsetof(struct __sk_buff, local_port)),
257 .prog_type = BPF_PROG_TYPE_SK_SKB,
260 "invalid access of tc_classid for SK_SKB",
262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
263 offsetof(struct __sk_buff, tc_classid)),
267 .prog_type = BPF_PROG_TYPE_SK_SKB,
268 .errstr = "invalid bpf_context access",
271 "invalid access of skb->mark for SK_SKB",
273 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
274 offsetof(struct __sk_buff, mark)),
278 .prog_type = BPF_PROG_TYPE_SK_SKB,
279 .errstr = "invalid bpf_context access",
282 "check skb->mark is not writeable by SK_SKB",
284 BPF_MOV64_IMM(BPF_REG_0, 0),
285 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
286 offsetof(struct __sk_buff, mark)),
290 .prog_type = BPF_PROG_TYPE_SK_SKB,
291 .errstr = "invalid bpf_context access",
294 "check skb->tc_index is writeable by SK_SKB",
296 BPF_MOV64_IMM(BPF_REG_0, 0),
297 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
298 offsetof(struct __sk_buff, tc_index)),
302 .prog_type = BPF_PROG_TYPE_SK_SKB,
305 "check skb->priority is writeable by SK_SKB",
307 BPF_MOV64_IMM(BPF_REG_0, 0),
308 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
309 offsetof(struct __sk_buff, priority)),
313 .prog_type = BPF_PROG_TYPE_SK_SKB,
316 "direct packet read for SK_SKB",
318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
319 offsetof(struct __sk_buff, data)),
320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
321 offsetof(struct __sk_buff, data_end)),
322 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
324 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
325 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
326 BPF_MOV64_IMM(BPF_REG_0, 0),
330 .prog_type = BPF_PROG_TYPE_SK_SKB,
333 "direct packet write for SK_SKB",
335 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
336 offsetof(struct __sk_buff, data)),
337 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
338 offsetof(struct __sk_buff, data_end)),
339 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
341 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
342 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
343 BPF_MOV64_IMM(BPF_REG_0, 0),
347 .prog_type = BPF_PROG_TYPE_SK_SKB,
350 "overlapping checks for direct packet access SK_SKB",
352 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
353 offsetof(struct __sk_buff, data)),
354 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
355 offsetof(struct __sk_buff, data_end)),
356 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
358 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
359 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
361 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
362 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
363 BPF_MOV64_IMM(BPF_REG_0, 0),
367 .prog_type = BPF_PROG_TYPE_SK_SKB,
370 "check skb->mark is not writeable by sockets",
372 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
373 offsetof(struct __sk_buff, mark)),
376 .errstr = "invalid bpf_context access",
377 .errstr_unpriv = "R1 leaks addr",
381 "check skb->tc_index is not writeable by sockets",
383 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
384 offsetof(struct __sk_buff, tc_index)),
387 .errstr = "invalid bpf_context access",
388 .errstr_unpriv = "R1 leaks addr",
392 "check cb access: byte",
394 BPF_MOV64_IMM(BPF_REG_0, 0),
395 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
396 offsetof(struct __sk_buff, cb[0])),
397 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
398 offsetof(struct __sk_buff, cb[0]) + 1),
399 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
400 offsetof(struct __sk_buff, cb[0]) + 2),
401 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
402 offsetof(struct __sk_buff, cb[0]) + 3),
403 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
404 offsetof(struct __sk_buff, cb[1])),
405 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
406 offsetof(struct __sk_buff, cb[1]) + 1),
407 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
408 offsetof(struct __sk_buff, cb[1]) + 2),
409 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
410 offsetof(struct __sk_buff, cb[1]) + 3),
411 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
412 offsetof(struct __sk_buff, cb[2])),
413 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
414 offsetof(struct __sk_buff, cb[2]) + 1),
415 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
416 offsetof(struct __sk_buff, cb[2]) + 2),
417 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
418 offsetof(struct __sk_buff, cb[2]) + 3),
419 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
420 offsetof(struct __sk_buff, cb[3])),
421 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
422 offsetof(struct __sk_buff, cb[3]) + 1),
423 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
424 offsetof(struct __sk_buff, cb[3]) + 2),
425 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
426 offsetof(struct __sk_buff, cb[3]) + 3),
427 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
428 offsetof(struct __sk_buff, cb[4])),
429 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
430 offsetof(struct __sk_buff, cb[4]) + 1),
431 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
432 offsetof(struct __sk_buff, cb[4]) + 2),
433 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
434 offsetof(struct __sk_buff, cb[4]) + 3),
435 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
436 offsetof(struct __sk_buff, cb[0])),
437 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
438 offsetof(struct __sk_buff, cb[0]) + 1),
439 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
440 offsetof(struct __sk_buff, cb[0]) + 2),
441 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
442 offsetof(struct __sk_buff, cb[0]) + 3),
443 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
444 offsetof(struct __sk_buff, cb[1])),
445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
446 offsetof(struct __sk_buff, cb[1]) + 1),
447 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
448 offsetof(struct __sk_buff, cb[1]) + 2),
449 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
450 offsetof(struct __sk_buff, cb[1]) + 3),
451 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
452 offsetof(struct __sk_buff, cb[2])),
453 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
454 offsetof(struct __sk_buff, cb[2]) + 1),
455 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
456 offsetof(struct __sk_buff, cb[2]) + 2),
457 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
458 offsetof(struct __sk_buff, cb[2]) + 3),
459 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
460 offsetof(struct __sk_buff, cb[3])),
461 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
462 offsetof(struct __sk_buff, cb[3]) + 1),
463 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
464 offsetof(struct __sk_buff, cb[3]) + 2),
465 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
466 offsetof(struct __sk_buff, cb[3]) + 3),
467 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
468 offsetof(struct __sk_buff, cb[4])),
469 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
470 offsetof(struct __sk_buff, cb[4]) + 1),
471 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
472 offsetof(struct __sk_buff, cb[4]) + 2),
473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
474 offsetof(struct __sk_buff, cb[4]) + 3),
480 "__sk_buff->hash, offset 0, byte store not permitted",
482 BPF_MOV64_IMM(BPF_REG_0, 0),
483 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
484 offsetof(struct __sk_buff, hash)),
487 .errstr = "invalid bpf_context access",
491 "__sk_buff->tc_index, offset 3, byte store not permitted",
493 BPF_MOV64_IMM(BPF_REG_0, 0),
494 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
495 offsetof(struct __sk_buff, tc_index) + 3),
498 .errstr = "invalid bpf_context access",
502 "check skb->hash byte load permitted",
504 BPF_MOV64_IMM(BPF_REG_0, 0),
505 #if __BYTE_ORDER == __LITTLE_ENDIAN
506 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
507 offsetof(struct __sk_buff, hash)),
509 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
510 offsetof(struct __sk_buff, hash) + 3),
517 "check skb->hash byte load permitted 1",
519 BPF_MOV64_IMM(BPF_REG_0, 0),
520 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
521 offsetof(struct __sk_buff, hash) + 1),
527 "check skb->hash byte load permitted 2",
529 BPF_MOV64_IMM(BPF_REG_0, 0),
530 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
531 offsetof(struct __sk_buff, hash) + 2),
537 "check skb->hash byte load permitted 3",
539 BPF_MOV64_IMM(BPF_REG_0, 0),
540 #if __BYTE_ORDER == __LITTLE_ENDIAN
541 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
542 offsetof(struct __sk_buff, hash) + 3),
544 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
545 offsetof(struct __sk_buff, hash)),
552 "check cb access: byte, wrong type",
554 BPF_MOV64_IMM(BPF_REG_0, 0),
555 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
556 offsetof(struct __sk_buff, cb[0])),
559 .errstr = "invalid bpf_context access",
561 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
564 "check cb access: half",
566 BPF_MOV64_IMM(BPF_REG_0, 0),
567 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
568 offsetof(struct __sk_buff, cb[0])),
569 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
570 offsetof(struct __sk_buff, cb[0]) + 2),
571 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
572 offsetof(struct __sk_buff, cb[1])),
573 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
574 offsetof(struct __sk_buff, cb[1]) + 2),
575 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
576 offsetof(struct __sk_buff, cb[2])),
577 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
578 offsetof(struct __sk_buff, cb[2]) + 2),
579 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
580 offsetof(struct __sk_buff, cb[3])),
581 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
582 offsetof(struct __sk_buff, cb[3]) + 2),
583 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
584 offsetof(struct __sk_buff, cb[4])),
585 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
586 offsetof(struct __sk_buff, cb[4]) + 2),
587 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
588 offsetof(struct __sk_buff, cb[0])),
589 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
590 offsetof(struct __sk_buff, cb[0]) + 2),
591 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
592 offsetof(struct __sk_buff, cb[1])),
593 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
594 offsetof(struct __sk_buff, cb[1]) + 2),
595 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
596 offsetof(struct __sk_buff, cb[2])),
597 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
598 offsetof(struct __sk_buff, cb[2]) + 2),
599 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
600 offsetof(struct __sk_buff, cb[3])),
601 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
602 offsetof(struct __sk_buff, cb[3]) + 2),
603 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
604 offsetof(struct __sk_buff, cb[4])),
605 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
606 offsetof(struct __sk_buff, cb[4]) + 2),
612 "check cb access: half, unaligned",
614 BPF_MOV64_IMM(BPF_REG_0, 0),
615 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
616 offsetof(struct __sk_buff, cb[0]) + 1),
619 .errstr = "misaligned context access",
621 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
624 "check __sk_buff->hash, offset 0, half store not permitted",
626 BPF_MOV64_IMM(BPF_REG_0, 0),
627 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
628 offsetof(struct __sk_buff, hash)),
631 .errstr = "invalid bpf_context access",
635 "check __sk_buff->tc_index, offset 2, half store not permitted",
637 BPF_MOV64_IMM(BPF_REG_0, 0),
638 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
639 offsetof(struct __sk_buff, tc_index) + 2),
642 .errstr = "invalid bpf_context access",
646 "check skb->hash half load permitted",
648 BPF_MOV64_IMM(BPF_REG_0, 0),
649 #if __BYTE_ORDER == __LITTLE_ENDIAN
650 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
651 offsetof(struct __sk_buff, hash)),
653 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
654 offsetof(struct __sk_buff, hash) + 2),
661 "check skb->hash half load permitted 2",
663 BPF_MOV64_IMM(BPF_REG_0, 0),
664 #if __BYTE_ORDER == __LITTLE_ENDIAN
665 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
666 offsetof(struct __sk_buff, hash) + 2),
668 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
669 offsetof(struct __sk_buff, hash)),
676 "check skb->hash half load not permitted, unaligned 1",
678 BPF_MOV64_IMM(BPF_REG_0, 0),
679 #if __BYTE_ORDER == __LITTLE_ENDIAN
680 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
681 offsetof(struct __sk_buff, hash) + 1),
683 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
684 offsetof(struct __sk_buff, hash) + 3),
688 .errstr = "invalid bpf_context access",
690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
693 "check skb->hash half load not permitted, unaligned 3",
695 BPF_MOV64_IMM(BPF_REG_0, 0),
696 #if __BYTE_ORDER == __LITTLE_ENDIAN
697 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
698 offsetof(struct __sk_buff, hash) + 3),
700 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
701 offsetof(struct __sk_buff, hash) + 1),
705 .errstr = "invalid bpf_context access",
707 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
708 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
711 "check cb access: half, wrong type",
713 BPF_MOV64_IMM(BPF_REG_0, 0),
714 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
715 offsetof(struct __sk_buff, cb[0])),
718 .errstr = "invalid bpf_context access",
720 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
723 "check cb access: word",
725 BPF_MOV64_IMM(BPF_REG_0, 0),
726 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
727 offsetof(struct __sk_buff, cb[0])),
728 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
729 offsetof(struct __sk_buff, cb[1])),
730 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
731 offsetof(struct __sk_buff, cb[2])),
732 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
733 offsetof(struct __sk_buff, cb[3])),
734 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
735 offsetof(struct __sk_buff, cb[4])),
736 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
737 offsetof(struct __sk_buff, cb[0])),
738 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
739 offsetof(struct __sk_buff, cb[1])),
740 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
741 offsetof(struct __sk_buff, cb[2])),
742 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
743 offsetof(struct __sk_buff, cb[3])),
744 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
745 offsetof(struct __sk_buff, cb[4])),
751 "check cb access: word, unaligned 1",
753 BPF_MOV64_IMM(BPF_REG_0, 0),
754 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
755 offsetof(struct __sk_buff, cb[0]) + 2),
758 .errstr = "misaligned context access",
760 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
763 "check cb access: word, unaligned 2",
765 BPF_MOV64_IMM(BPF_REG_0, 0),
766 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
767 offsetof(struct __sk_buff, cb[4]) + 1),
770 .errstr = "misaligned context access",
772 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
775 "check cb access: word, unaligned 3",
777 BPF_MOV64_IMM(BPF_REG_0, 0),
778 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
779 offsetof(struct __sk_buff, cb[4]) + 2),
782 .errstr = "misaligned context access",
784 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
787 "check cb access: word, unaligned 4",
789 BPF_MOV64_IMM(BPF_REG_0, 0),
790 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
791 offsetof(struct __sk_buff, cb[4]) + 3),
794 .errstr = "misaligned context access",
796 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
799 "check cb access: double",
801 BPF_MOV64_IMM(BPF_REG_0, 0),
802 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
803 offsetof(struct __sk_buff, cb[0])),
804 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
805 offsetof(struct __sk_buff, cb[2])),
806 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
807 offsetof(struct __sk_buff, cb[0])),
808 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
809 offsetof(struct __sk_buff, cb[2])),
815 "check cb access: double, unaligned 1",
817 BPF_MOV64_IMM(BPF_REG_0, 0),
818 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
819 offsetof(struct __sk_buff, cb[1])),
822 .errstr = "misaligned context access",
824 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
827 "check cb access: double, unaligned 2",
829 BPF_MOV64_IMM(BPF_REG_0, 0),
830 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
831 offsetof(struct __sk_buff, cb[3])),
834 .errstr = "misaligned context access",
836 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
839 "check cb access: double, oob 1",
841 BPF_MOV64_IMM(BPF_REG_0, 0),
842 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
843 offsetof(struct __sk_buff, cb[4])),
846 .errstr = "invalid bpf_context access",
850 "check cb access: double, oob 2",
852 BPF_MOV64_IMM(BPF_REG_0, 0),
853 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
854 offsetof(struct __sk_buff, cb[4])),
857 .errstr = "invalid bpf_context access",
861 "check __sk_buff->ifindex dw store not permitted",
863 BPF_MOV64_IMM(BPF_REG_0, 0),
864 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
865 offsetof(struct __sk_buff, ifindex)),
868 .errstr = "invalid bpf_context access",
872 "check __sk_buff->ifindex dw load not permitted",
874 BPF_MOV64_IMM(BPF_REG_0, 0),
875 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
876 offsetof(struct __sk_buff, ifindex)),
879 .errstr = "invalid bpf_context access",
883 "check cb access: double, wrong type",
885 BPF_MOV64_IMM(BPF_REG_0, 0),
886 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
887 offsetof(struct __sk_buff, cb[0])),
890 .errstr = "invalid bpf_context access",
892 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
895 "check out of range skb->cb access",
897 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
898 offsetof(struct __sk_buff, cb[0]) + 256),
901 .errstr = "invalid bpf_context access",
904 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
907 "write skb fields from socket prog",
909 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
910 offsetof(struct __sk_buff, cb[4])),
911 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
912 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
913 offsetof(struct __sk_buff, mark)),
914 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
915 offsetof(struct __sk_buff, tc_index)),
916 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
917 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
918 offsetof(struct __sk_buff, cb[0])),
919 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
920 offsetof(struct __sk_buff, cb[2])),
924 .errstr_unpriv = "R1 leaks addr",
925 .result_unpriv = REJECT,
928 "write skb fields from tc_cls_act prog",
930 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
931 offsetof(struct __sk_buff, cb[0])),
932 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
933 offsetof(struct __sk_buff, mark)),
934 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
935 offsetof(struct __sk_buff, tc_index)),
936 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
937 offsetof(struct __sk_buff, tc_index)),
938 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
939 offsetof(struct __sk_buff, cb[3])),
940 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
941 offsetof(struct __sk_buff, tstamp)),
942 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
943 offsetof(struct __sk_buff, tstamp)),
947 .result_unpriv = REJECT,
949 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
952 "check skb->data half load not permitted",
954 BPF_MOV64_IMM(BPF_REG_0, 0),
955 #if __BYTE_ORDER == __LITTLE_ENDIAN
956 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
957 offsetof(struct __sk_buff, data)),
959 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
960 offsetof(struct __sk_buff, data) + 2),
965 .errstr = "invalid bpf_context access",
968 "read gso_segs from CGROUP_SKB",
970 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
971 offsetof(struct __sk_buff, gso_segs)),
972 BPF_MOV64_IMM(BPF_REG_0, 0),
976 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
979 "write gso_segs from CGROUP_SKB",
981 BPF_MOV64_IMM(BPF_REG_0, 0),
982 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
983 offsetof(struct __sk_buff, gso_segs)),
984 BPF_MOV64_IMM(BPF_REG_0, 0),
988 .result_unpriv = REJECT,
989 .errstr = "invalid bpf_context access off=164 size=4",
990 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
993 "read gso_segs from CLS",
995 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
996 offsetof(struct __sk_buff, gso_segs)),
997 BPF_MOV64_IMM(BPF_REG_0, 0),
1001 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1004 "check wire_len is not readable by sockets",
1006 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1007 offsetof(struct __sk_buff, wire_len)),
1010 .errstr = "invalid bpf_context access",
1014 "check wire_len is readable by tc classifier",
1016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1017 offsetof(struct __sk_buff, wire_len)),
1020 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1024 "check wire_len is not writable by tc classifier",
1026 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1027 offsetof(struct __sk_buff, wire_len)),
1030 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1031 .errstr = "invalid bpf_context access",
1032 .errstr_unpriv = "R1 leaks addr",