2 "reference tracking: leak potential reference",
5 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
8 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9 .errstr = "Unreleased reference",
13 "reference tracking: leak potential reference on stack",
16 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
17 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
18 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
19 BPF_MOV64_IMM(BPF_REG_0, 0),
22 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
23 .errstr = "Unreleased reference",
27 "reference tracking: leak potential reference on stack 2",
30 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
31 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
32 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
33 BPF_MOV64_IMM(BPF_REG_0, 0),
34 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
37 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
38 .errstr = "Unreleased reference",
42 "reference tracking: zero potential reference",
45 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 .errstr = "Unreleased reference",
53 "reference tracking: copy and zero potential references",
56 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
57 BPF_MOV64_IMM(BPF_REG_0, 0),
58 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
61 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
62 .errstr = "Unreleased reference",
66 "reference tracking: release reference without check",
69 /* reference in r0 may be NULL */
70 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
71 BPF_MOV64_IMM(BPF_REG_2, 0),
72 BPF_EMIT_CALL(BPF_FUNC_sk_release),
75 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
76 .errstr = "type=sock_or_null expected=sock",
80 "reference tracking: release reference",
83 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
84 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
85 BPF_EMIT_CALL(BPF_FUNC_sk_release),
88 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
92 "reference tracking: release reference 2",
95 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
96 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
98 BPF_EMIT_CALL(BPF_FUNC_sk_release),
101 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
105 "reference tracking: release reference twice",
108 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
109 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
111 BPF_EMIT_CALL(BPF_FUNC_sk_release),
112 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
113 BPF_EMIT_CALL(BPF_FUNC_sk_release),
116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
117 .errstr = "type=inv expected=sock",
121 "reference tracking: release reference twice inside branch",
124 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
125 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
127 BPF_EMIT_CALL(BPF_FUNC_sk_release),
128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
129 BPF_EMIT_CALL(BPF_FUNC_sk_release),
132 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
133 .errstr = "type=inv expected=sock",
137 "reference tracking: alloc, check, free in one subbranch",
139 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
140 offsetof(struct __sk_buff, data)),
141 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
142 offsetof(struct __sk_buff, data_end)),
143 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
145 /* if (offsetof(skb, mark) > data_len) exit; */
146 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
148 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
149 offsetof(struct __sk_buff, mark)),
151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
152 /* Leak reference in R0 */
154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
155 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
156 BPF_EMIT_CALL(BPF_FUNC_sk_release),
159 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
160 .errstr = "Unreleased reference",
162 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
165 "reference tracking: alloc, check, free in both subbranches",
167 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
168 offsetof(struct __sk_buff, data)),
169 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
170 offsetof(struct __sk_buff, data_end)),
171 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
173 /* if (offsetof(skb, mark) > data_len) exit; */
174 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
176 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
177 offsetof(struct __sk_buff, mark)),
179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
181 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
182 BPF_EMIT_CALL(BPF_FUNC_sk_release),
184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
186 BPF_EMIT_CALL(BPF_FUNC_sk_release),
189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
191 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
194 "reference tracking in call: free reference in subprog",
197 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
198 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
199 BPF_MOV64_IMM(BPF_REG_0, 0),
203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
204 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
205 BPF_EMIT_CALL(BPF_FUNC_sk_release),
208 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
212 "reference tracking in call: free reference in subprog and outside",
215 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
216 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
218 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
219 BPF_EMIT_CALL(BPF_FUNC_sk_release),
223 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
225 BPF_EMIT_CALL(BPF_FUNC_sk_release),
228 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
229 .errstr = "type=inv expected=sock",
233 "reference tracking in call: alloc & leak reference in subprog",
235 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
239 BPF_MOV64_IMM(BPF_REG_0, 0),
243 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
245 /* spill unchecked sk_ptr into stack of caller */
246 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
247 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
250 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
251 .errstr = "Unreleased reference",
255 "reference tracking in call: alloc in subprog, release outside",
257 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
259 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
261 BPF_EMIT_CALL(BPF_FUNC_sk_release),
266 BPF_EXIT_INSN(), /* return sk */
268 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
269 .retval = POINTER_VALUE,
273 "reference tracking in call: sk_ptr leak into caller stack",
275 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
278 BPF_MOV64_IMM(BPF_REG_0, 0),
282 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
284 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
286 /* spill unchecked sk_ptr into stack of caller */
287 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
289 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
290 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
297 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
298 .errstr = "Unreleased reference",
302 "reference tracking in call: sk_ptr spill into caller stack",
304 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
307 BPF_MOV64_IMM(BPF_REG_0, 0),
311 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
313 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
315 /* spill unchecked sk_ptr into stack of caller */
316 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
318 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
319 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
321 /* now the sk_ptr is verified, free the reference */
322 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
323 BPF_EMIT_CALL(BPF_FUNC_sk_release),
330 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
334 "reference tracking: allow LD_ABS",
336 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
339 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
340 BPF_EMIT_CALL(BPF_FUNC_sk_release),
341 BPF_LD_ABS(BPF_B, 0),
342 BPF_LD_ABS(BPF_H, 0),
343 BPF_LD_ABS(BPF_W, 0),
346 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
350 "reference tracking: forbid LD_ABS while holding reference",
352 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
354 BPF_LD_ABS(BPF_B, 0),
355 BPF_LD_ABS(BPF_H, 0),
356 BPF_LD_ABS(BPF_W, 0),
357 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
358 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
359 BPF_EMIT_CALL(BPF_FUNC_sk_release),
362 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
363 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
367 "reference tracking: allow LD_IND",
369 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
371 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
372 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
373 BPF_EMIT_CALL(BPF_FUNC_sk_release),
374 BPF_MOV64_IMM(BPF_REG_7, 1),
375 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
376 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
379 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
384 "reference tracking: forbid LD_IND while holding reference",
386 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
388 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
389 BPF_MOV64_IMM(BPF_REG_7, 1),
390 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
391 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
393 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
394 BPF_EMIT_CALL(BPF_FUNC_sk_release),
397 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
398 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
402 "reference tracking: check reference or tail call",
404 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
406 /* if (sk) bpf_sk_release() */
407 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
408 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
409 /* bpf_tail_call() */
410 BPF_MOV64_IMM(BPF_REG_3, 2),
411 BPF_LD_MAP_FD(BPF_REG_2, 0),
412 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
413 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
414 BPF_MOV64_IMM(BPF_REG_0, 0),
416 BPF_EMIT_CALL(BPF_FUNC_sk_release),
419 .fixup_prog1 = { 17 },
420 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
424 "reference tracking: release reference then tail call",
426 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
428 /* if (sk) bpf_sk_release() */
429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
430 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
431 BPF_EMIT_CALL(BPF_FUNC_sk_release),
432 /* bpf_tail_call() */
433 BPF_MOV64_IMM(BPF_REG_3, 2),
434 BPF_LD_MAP_FD(BPF_REG_2, 0),
435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
437 BPF_MOV64_IMM(BPF_REG_0, 0),
440 .fixup_prog1 = { 18 },
441 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
445 "reference tracking: leak possible reference over tail call",
447 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
448 /* Look up socket and store in REG_6 */
450 /* bpf_tail_call() */
451 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
452 BPF_MOV64_IMM(BPF_REG_3, 2),
453 BPF_LD_MAP_FD(BPF_REG_2, 0),
454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
456 BPF_MOV64_IMM(BPF_REG_0, 0),
457 /* if (sk) bpf_sk_release() */
458 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
459 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
460 BPF_EMIT_CALL(BPF_FUNC_sk_release),
463 .fixup_prog1 = { 16 },
464 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
465 .errstr = "tail_call would lead to reference leak",
469 "reference tracking: leak checked reference over tail call",
471 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
472 /* Look up socket and store in REG_6 */
474 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
475 /* if (!sk) goto end */
476 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
477 /* bpf_tail_call() */
478 BPF_MOV64_IMM(BPF_REG_3, 0),
479 BPF_LD_MAP_FD(BPF_REG_2, 0),
480 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
481 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
482 BPF_MOV64_IMM(BPF_REG_0, 0),
483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
484 BPF_EMIT_CALL(BPF_FUNC_sk_release),
487 .fixup_prog1 = { 17 },
488 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
489 .errstr = "tail_call would lead to reference leak",
493 "reference tracking: mangle and release sock_or_null",
496 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
498 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
499 BPF_EMIT_CALL(BPF_FUNC_sk_release),
502 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
503 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
507 "reference tracking: mangle and release sock",
510 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
511 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
513 BPF_EMIT_CALL(BPF_FUNC_sk_release),
516 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
517 .errstr = "R1 pointer arithmetic on sock prohibited",
521 "reference tracking: access member",
524 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
525 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
526 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
527 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
528 BPF_EMIT_CALL(BPF_FUNC_sk_release),
531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
535 "reference tracking: write to member",
538 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
539 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
540 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
541 BPF_LD_IMM64(BPF_REG_2, 42),
542 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
543 offsetof(struct bpf_sock, mark)),
544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
545 BPF_EMIT_CALL(BPF_FUNC_sk_release),
546 BPF_LD_IMM64(BPF_REG_0, 0),
549 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
550 .errstr = "cannot write into sock",
554 "reference tracking: invalid 64-bit access of member",
557 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
559 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
560 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
561 BPF_EMIT_CALL(BPF_FUNC_sk_release),
564 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
565 .errstr = "invalid sock access off=0 size=8",
569 "reference tracking: access after release",
572 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
574 BPF_EMIT_CALL(BPF_FUNC_sk_release),
575 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
578 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
579 .errstr = "!read_ok",
583 "reference tracking: direct access for lookup",
585 /* Check that the packet is at least 64B long */
586 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
587 offsetof(struct __sk_buff, data)),
588 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
589 offsetof(struct __sk_buff, data_end)),
590 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
592 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
593 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
594 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
595 BPF_MOV64_IMM(BPF_REG_4, 0),
596 BPF_MOV64_IMM(BPF_REG_5, 0),
597 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
598 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
600 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
602 BPF_EMIT_CALL(BPF_FUNC_sk_release),
605 .prog_type = BPF_PROG_TYPE_SCHED_CLS,