1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 #ifndef _UAPI__LINUX_BPF_H__
9 #define _UAPI__LINUX_BPF_H__
11 #include <linux/types.h>
12 #include <linux/bpf_common.h>
14 /* Extended instruction set based on top of classic BPF */
16 /* instruction classes */
17 #define BPF_ALU64 0x07 /* alu mode in double word width */
20 #define BPF_DW 0x18 /* double word (64-bit) */
21 #define BPF_XADD 0xc0 /* exclusive add */
24 #define BPF_MOV 0xb0 /* mov reg to reg */
25 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
27 /* change endianness of a register */
28 #define BPF_END 0xd0 /* flags for endianness conversion: */
29 #define BPF_TO_LE 0x00 /* convert to little-endian */
30 #define BPF_TO_BE 0x08 /* convert to big-endian */
31 #define BPF_FROM_LE BPF_TO_LE
32 #define BPF_FROM_BE BPF_TO_BE
35 #define BPF_JNE 0x50 /* jump != */
36 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */
37 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
38 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
39 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
40 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */
41 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
42 #define BPF_CALL 0x80 /* function call */
43 #define BPF_EXIT 0x90 /* function return */
45 /* Register numbers */
61 /* BPF has 10 general purpose 64-bit registers and stack frame. */
62 #define MAX_BPF_REG __MAX_BPF_REG
65 __u8 code; /* opcode */
66 __u8 dst_reg:4; /* dest register */
67 __u8 src_reg:4; /* source register */
68 __s16 off; /* signed offset */
69 __s32 imm; /* signed immediate constant */
72 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
73 struct bpf_lpm_trie_key {
74 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
75 __u8 data[0]; /* Arbitrary size */
78 /* BPF syscall commands, see bpf(2) man-page for details. */
93 BPF_PROG_GET_FD_BY_ID,
95 BPF_OBJ_GET_INFO_BY_FD,
103 BPF_MAP_TYPE_PROG_ARRAY,
104 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
105 BPF_MAP_TYPE_PERCPU_HASH,
106 BPF_MAP_TYPE_PERCPU_ARRAY,
107 BPF_MAP_TYPE_STACK_TRACE,
108 BPF_MAP_TYPE_CGROUP_ARRAY,
109 BPF_MAP_TYPE_LRU_HASH,
110 BPF_MAP_TYPE_LRU_PERCPU_HASH,
111 BPF_MAP_TYPE_LPM_TRIE,
112 BPF_MAP_TYPE_ARRAY_OF_MAPS,
113 BPF_MAP_TYPE_HASH_OF_MAPS,
115 BPF_MAP_TYPE_SOCKMAP,
120 BPF_PROG_TYPE_UNSPEC,
121 BPF_PROG_TYPE_SOCKET_FILTER,
122 BPF_PROG_TYPE_KPROBE,
123 BPF_PROG_TYPE_SCHED_CLS,
124 BPF_PROG_TYPE_SCHED_ACT,
125 BPF_PROG_TYPE_TRACEPOINT,
127 BPF_PROG_TYPE_PERF_EVENT,
128 BPF_PROG_TYPE_CGROUP_SKB,
129 BPF_PROG_TYPE_CGROUP_SOCK,
130 BPF_PROG_TYPE_LWT_IN,
131 BPF_PROG_TYPE_LWT_OUT,
132 BPF_PROG_TYPE_LWT_XMIT,
133 BPF_PROG_TYPE_SOCK_OPS,
134 BPF_PROG_TYPE_SK_SKB,
135 BPF_PROG_TYPE_CGROUP_DEVICE,
136 BPF_PROG_TYPE_SK_MSG,
139 enum bpf_attach_type {
140 BPF_CGROUP_INET_INGRESS,
141 BPF_CGROUP_INET_EGRESS,
142 BPF_CGROUP_INET_SOCK_CREATE,
144 BPF_SK_SKB_STREAM_PARSER,
145 BPF_SK_SKB_STREAM_VERDICT,
148 __MAX_BPF_ATTACH_TYPE
151 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
153 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
155 * NONE(default): No further bpf programs allowed in the subtree.
157 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
158 * the program in this cgroup yields to sub-cgroup program.
160 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
161 * that cgroup program gets run in addition to the program in this cgroup.
163 * Only one program is allowed to be attached to a cgroup with
164 * NONE or BPF_F_ALLOW_OVERRIDE flag.
165 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
166 * release old program and attach the new one. Attach flags has to match.
168 * Multiple programs are allowed to be attached to a cgroup with
169 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
170 * (those that were attached first, run first)
171 * The programs of sub-cgroup are executed first, then programs of
172 * this cgroup and then programs of parent cgroup.
173 * When children program makes decision (like picking TCP CA or sock bind)
174 * parent program has a chance to override it.
176 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
177 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
179 * cgrp1 (MULTI progs A, B) ->
180 * cgrp2 (OVERRIDE prog C) ->
181 * cgrp3 (MULTI prog D) ->
182 * cgrp4 (OVERRIDE prog E) ->
183 * cgrp5 (NONE prog F)
184 * the event in cgrp5 triggers execution of F,D,A,B in that order.
185 * if prog F is detached, the execution is E,D,A,B
186 * if prog F and D are detached, the execution is E,A,B
187 * if prog F, E and D are detached, the execution is C,A,B
189 * All eligible programs are executed regardless of return code from
192 #define BPF_F_ALLOW_OVERRIDE (1U << 0)
193 #define BPF_F_ALLOW_MULTI (1U << 1)
195 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
196 * verifier will perform strict alignment checking as if the kernel
197 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
198 * and NET_IP_ALIGN defined to 2.
200 #define BPF_F_STRICT_ALIGNMENT (1U << 0)
202 /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
203 #define BPF_PSEUDO_MAP_FD 1
205 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
206 * offset to another bpf function
208 #define BPF_PSEUDO_CALL 1
210 /* flags for BPF_MAP_UPDATE_ELEM command */
211 #define BPF_ANY 0 /* create new element or update existing */
212 #define BPF_NOEXIST 1 /* create new element if it didn't exist */
213 #define BPF_EXIST 2 /* update existing element */
215 /* flags for BPF_MAP_CREATE command */
216 #define BPF_F_NO_PREALLOC (1U << 0)
217 /* Instead of having one common LRU list in the
218 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
219 * which can scale and perform better.
220 * Note, the LRU nodes (including free nodes) cannot be moved
221 * across different LRU lists.
223 #define BPF_F_NO_COMMON_LRU (1U << 1)
224 /* Specify numa node during map creation */
225 #define BPF_F_NUMA_NODE (1U << 2)
227 /* flags for BPF_PROG_QUERY */
228 #define BPF_F_QUERY_EFFECTIVE (1U << 0)
230 #define BPF_OBJ_NAME_LEN 16U
232 /* Flags for accessing BPF object */
233 #define BPF_F_RDONLY (1U << 3)
234 #define BPF_F_WRONLY (1U << 4)
236 /* Flag for stack_map, store build_id+offset instead of pointer */
237 #define BPF_F_STACK_BUILD_ID (1U << 5)
239 enum bpf_stack_build_id_status {
240 /* user space need an empty entry to identify end of a trace */
241 BPF_STACK_BUILD_ID_EMPTY = 0,
242 /* with valid build_id and offset */
243 BPF_STACK_BUILD_ID_VALID = 1,
244 /* couldn't get build_id, fallback to ip */
245 BPF_STACK_BUILD_ID_IP = 2,
248 #define BPF_BUILD_ID_SIZE 20
249 struct bpf_stack_build_id {
251 unsigned char build_id[BPF_BUILD_ID_SIZE];
259 struct { /* anonymous struct used by BPF_MAP_CREATE command */
260 __u32 map_type; /* one of enum bpf_map_type */
261 __u32 key_size; /* size of key in bytes */
262 __u32 value_size; /* size of value in bytes */
263 __u32 max_entries; /* max number of entries in a map */
264 __u32 map_flags; /* BPF_MAP_CREATE related
265 * flags defined above.
267 __u32 inner_map_fd; /* fd pointing to the inner map */
268 __u32 numa_node; /* numa node (effective only if
269 * BPF_F_NUMA_NODE is set).
271 char map_name[BPF_OBJ_NAME_LEN];
272 __u32 map_ifindex; /* ifindex of netdev to create on */
275 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
280 __aligned_u64 next_key;
285 struct { /* anonymous struct used by BPF_PROG_LOAD command */
286 __u32 prog_type; /* one of enum bpf_prog_type */
289 __aligned_u64 license;
290 __u32 log_level; /* verbosity level of verifier */
291 __u32 log_size; /* size of user buffer */
292 __aligned_u64 log_buf; /* user supplied buffer */
293 __u32 kern_version; /* checked when prog_type=kprobe */
295 char prog_name[BPF_OBJ_NAME_LEN];
296 __u32 prog_ifindex; /* ifindex of netdev to prep for */
299 struct { /* anonymous struct used by BPF_OBJ_* commands */
300 __aligned_u64 pathname;
305 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
306 __u32 target_fd; /* container object to attach to */
307 __u32 attach_bpf_fd; /* eBPF program to attach */
312 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
317 __aligned_u64 data_in;
318 __aligned_u64 data_out;
323 struct { /* anonymous struct used by BPF_*_GET_*_ID */
333 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
339 struct { /* anonymous struct used by BPF_PROG_QUERY command */
340 __u32 target_fd; /* container object to query */
344 __aligned_u64 prog_ids;
347 } __attribute__((aligned(8)));
349 /* BPF helper function descriptions:
351 * void *bpf_map_lookup_elem(&map, &key)
352 * Return: Map value or NULL
354 * int bpf_map_update_elem(&map, &key, &value, flags)
355 * Return: 0 on success or negative error
357 * int bpf_map_delete_elem(&map, &key)
358 * Return: 0 on success or negative error
360 * int bpf_probe_read(void *dst, int size, void *src)
361 * Return: 0 on success or negative error
363 * u64 bpf_ktime_get_ns(void)
364 * Return: current ktime
366 * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
367 * Return: length of buffer written or negative error
369 * u32 bpf_prandom_u32(void)
370 * Return: random value
372 * u32 bpf_raw_smp_processor_id(void)
373 * Return: SMP processor ID
375 * int bpf_skb_store_bytes(skb, offset, from, len, flags)
376 * store bytes into packet
377 * @skb: pointer to skb
378 * @offset: offset within packet from skb->mac_header
379 * @from: pointer where to copy bytes from
380 * @len: number of bytes to store into packet
381 * @flags: bit 0 - if true, recompute skb->csum
382 * other bits - reserved
383 * Return: 0 on success or negative error
385 * int bpf_l3_csum_replace(skb, offset, from, to, flags)
386 * recompute IP checksum
387 * @skb: pointer to skb
388 * @offset: offset within packet where IP checksum is located
389 * @from: old value of header field
390 * @to: new value of header field
391 * @flags: bits 0-3 - size of header field
392 * other bits - reserved
393 * Return: 0 on success or negative error
395 * int bpf_l4_csum_replace(skb, offset, from, to, flags)
396 * recompute TCP/UDP checksum
397 * @skb: pointer to skb
398 * @offset: offset within packet where TCP/UDP checksum is located
399 * @from: old value of header field
400 * @to: new value of header field
401 * @flags: bits 0-3 - size of header field
402 * bit 4 - is pseudo header
403 * other bits - reserved
404 * Return: 0 on success or negative error
406 * int bpf_tail_call(ctx, prog_array_map, index)
407 * jump into another BPF program
408 * @ctx: context pointer passed to next program
409 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
410 * @index: 32-bit index inside array that selects specific program to run
411 * Return: 0 on success or negative error
413 * int bpf_clone_redirect(skb, ifindex, flags)
414 * redirect to another netdev
415 * @skb: pointer to skb
416 * @ifindex: ifindex of the net device
417 * @flags: bit 0 - if set, redirect to ingress instead of egress
418 * other bits - reserved
419 * Return: 0 on success or negative error
421 * u64 bpf_get_current_pid_tgid(void)
422 * Return: current->tgid << 32 | current->pid
424 * u64 bpf_get_current_uid_gid(void)
425 * Return: current_gid << 32 | current_uid
427 * int bpf_get_current_comm(char *buf, int size_of_buf)
428 * stores current->comm into buf
429 * Return: 0 on success or negative error
431 * u32 bpf_get_cgroup_classid(skb)
432 * retrieve a proc's classid
433 * @skb: pointer to skb
434 * Return: classid if != 0
436 * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
437 * Return: 0 on success or negative error
439 * int bpf_skb_vlan_pop(skb)
440 * Return: 0 on success or negative error
442 * int bpf_skb_get_tunnel_key(skb, key, size, flags)
443 * int bpf_skb_set_tunnel_key(skb, key, size, flags)
444 * retrieve or populate tunnel metadata
445 * @skb: pointer to skb
446 * @key: pointer to 'struct bpf_tunnel_key'
447 * @size: size of 'struct bpf_tunnel_key'
448 * @flags: room for future extensions
449 * Return: 0 on success or negative error
451 * u64 bpf_perf_event_read(map, flags)
452 * read perf event counter value
453 * @map: pointer to perf_event_array map
454 * @flags: index of event in the map or bitmask flags
455 * Return: value of perf event counter read or error code
457 * int bpf_redirect(ifindex, flags)
458 * redirect to another netdev
459 * @ifindex: ifindex of the net device
462 * bit 0 - if set, redirect to ingress instead of egress
463 * other bits - reserved
465 * all bits - reserved
466 * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
467 * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
468 * int bpf_redirect_map(map, key, flags)
469 * redirect to endpoint in map
470 * @map: pointer to dev map
471 * @key: index in map to lookup
473 * Return: XDP_REDIRECT on success or XDP_ABORT on error
475 * u32 bpf_get_route_realm(skb)
476 * retrieve a dst's tclassid
477 * @skb: pointer to skb
478 * Return: realm if != 0
480 * int bpf_perf_event_output(ctx, map, flags, data, size)
481 * output perf raw sample
482 * @ctx: struct pt_regs*
483 * @map: pointer to perf_event_array map
484 * @flags: index of event in the map or bitmask flags
485 * @data: data on stack to be output as raw data
486 * @size: size of data
487 * Return: 0 on success or negative error
489 * int bpf_get_stackid(ctx, map, flags)
490 * walk user or kernel stack and return id
491 * @ctx: struct pt_regs*
492 * @map: pointer to stack_trace map
493 * @flags: bits 0-7 - numer of stack frames to skip
494 * bit 8 - collect user stack instead of kernel
495 * bit 9 - compare stacks by hash only
496 * bit 10 - if two different stacks hash into the same stackid
498 * other bits - reserved
499 * Return: >= 0 stackid on success or negative error
501 * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
502 * calculate csum diff
503 * @from: raw from buffer
504 * @from_size: length of from buffer
506 * @to_size: length of to buffer
507 * @seed: optional seed
508 * Return: csum result or negative error code
510 * int bpf_skb_get_tunnel_opt(skb, opt, size)
511 * retrieve tunnel options metadata
512 * @skb: pointer to skb
513 * @opt: pointer to raw tunnel option data
514 * @size: size of @opt
515 * Return: option size
517 * int bpf_skb_set_tunnel_opt(skb, opt, size)
518 * populate tunnel options metadata
519 * @skb: pointer to skb
520 * @opt: pointer to raw tunnel option data
521 * @size: size of @opt
522 * Return: 0 on success or negative error
524 * int bpf_skb_change_proto(skb, proto, flags)
525 * Change protocol of the skb. Currently supported is v4 -> v6,
526 * v6 -> v4 transitions. The helper will also resize the skb. eBPF
527 * program is expected to fill the new headers via skb_store_bytes
528 * and lX_csum_replace.
529 * @skb: pointer to skb
530 * @proto: new skb->protocol type
532 * Return: 0 on success or negative error
534 * int bpf_skb_change_type(skb, type)
535 * Change packet type of skb.
536 * @skb: pointer to skb
537 * @type: new skb->pkt_type type
538 * Return: 0 on success or negative error
540 * int bpf_skb_under_cgroup(skb, map, index)
541 * Check cgroup2 membership of skb
542 * @skb: pointer to skb
543 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
544 * @index: index of the cgroup in the bpf_map
546 * == 0 skb failed the cgroup2 descendant test
547 * == 1 skb succeeded the cgroup2 descendant test
550 * u32 bpf_get_hash_recalc(skb)
551 * Retrieve and possibly recalculate skb->hash.
552 * @skb: pointer to skb
555 * u64 bpf_get_current_task(void)
556 * Returns current task_struct
559 * int bpf_probe_write_user(void *dst, void *src, int len)
560 * safely attempt to write to a location
561 * @dst: destination address in userspace
562 * @src: source address on stack
563 * @len: number of bytes to copy
564 * Return: 0 on success or negative error
566 * int bpf_current_task_under_cgroup(map, index)
567 * Check cgroup2 membership of current task
568 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
569 * @index: index of the cgroup in the bpf_map
571 * == 0 current failed the cgroup2 descendant test
572 * == 1 current succeeded the cgroup2 descendant test
575 * int bpf_skb_change_tail(skb, len, flags)
576 * The helper will resize the skb to the given new size, to be used f.e.
577 * with control messages.
578 * @skb: pointer to skb
579 * @len: new skb length
581 * Return: 0 on success or negative error
583 * int bpf_skb_pull_data(skb, len)
584 * The helper will pull in non-linear data in case the skb is non-linear
585 * and not all of len are part of the linear section. Only needed for
586 * read/write with direct packet access.
587 * @skb: pointer to skb
588 * @len: len to make read/writeable
589 * Return: 0 on success or negative error
591 * s64 bpf_csum_update(skb, csum)
592 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
593 * @skb: pointer to skb
595 * Return: csum on success or negative error
597 * void bpf_set_hash_invalid(skb)
598 * Invalidate current skb->hash.
599 * @skb: pointer to skb
601 * int bpf_get_numa_node_id()
602 * Return: Id of current NUMA node.
604 * int bpf_skb_change_head()
605 * Grows headroom of skb and adjusts MAC header offset accordingly.
606 * Will extends/reallocae as required automatically.
607 * May change skb data pointer and will thus invalidate any check
608 * performed for direct packet access.
609 * @skb: pointer to skb
610 * @len: length of header to be pushed in front
611 * @flags: Flags (unused for now)
612 * Return: 0 on success or negative error
614 * int bpf_xdp_adjust_head(xdp_md, delta)
615 * Adjust the xdp_md.data by delta
616 * @xdp_md: pointer to xdp_md
617 * @delta: An positive/negative integer to be added to xdp_md.data
618 * Return: 0 on success or negative on error
620 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
621 * Copy a NUL terminated string from unsafe address. In case the string
622 * length is smaller than size, the target is not padded with further NUL
623 * bytes. In case the string length is larger than size, just count-1
624 * bytes are copied and the last byte is set to NUL.
625 * @dst: destination address
626 * @size: maximum number of bytes to copy, including the trailing NUL
627 * @unsafe_ptr: unsafe address
629 * > 0 length of the string including the trailing NUL on success
632 * u64 bpf_get_socket_cookie(skb)
633 * Get the cookie for the socket stored inside sk_buff.
634 * @skb: pointer to skb
635 * Return: 8 Bytes non-decreasing number on success or 0 if the socket
636 * field is missing inside sk_buff
638 * u32 bpf_get_socket_uid(skb)
639 * Get the owner uid of the socket stored inside sk_buff.
640 * @skb: pointer to skb
641 * Return: uid of the socket owner on success or overflowuid if failed.
643 * u32 bpf_set_hash(skb, hash)
644 * Set full skb->hash.
645 * @skb: pointer to skb
648 * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
649 * Calls setsockopt. Not all opts are available, only those with
650 * integer optvals plus TCP_CONGESTION.
651 * Supported levels: SOL_SOCKET and IPPROTO_TCP
652 * @bpf_socket: pointer to bpf_socket
653 * @level: SOL_SOCKET or IPPROTO_TCP
654 * @optname: option name
655 * @optval: pointer to option value
656 * @optlen: length of optval in bytes
657 * Return: 0 or negative error
659 * int bpf_getsockopt(bpf_socket, level, optname, optval, optlen)
660 * Calls getsockopt. Not all opts are available.
661 * Supported levels: IPPROTO_TCP
662 * @bpf_socket: pointer to bpf_socket
663 * @level: IPPROTO_TCP
664 * @optname: option name
665 * @optval: pointer to option value
666 * @optlen: length of optval in bytes
667 * Return: 0 or negative error
669 * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
670 * Set callback flags for sock_ops
671 * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
672 * @flags: flags value
673 * Return: 0 for no error
674 * -EINVAL if there is no full tcp socket
675 * bits in flags that are not supported by current kernel
677 * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
678 * Grow or shrink room in sk_buff.
679 * @skb: pointer to skb
680 * @len_diff: (signed) amount of room to grow/shrink
681 * @mode: operation mode (enum bpf_adj_room_mode)
682 * @flags: reserved for future use
683 * Return: 0 on success or negative error code
685 * int bpf_sk_redirect_map(map, key, flags)
686 * Redirect skb to a sock in map using key as a lookup key for the
688 * @map: pointer to sockmap
689 * @key: key to lookup sock in map
690 * @flags: reserved for future use
693 * int bpf_sock_map_update(skops, map, key, flags)
694 * @skops: pointer to bpf_sock_ops
695 * @map: pointer to sockmap to update
696 * @key: key to insert/update sock in map
697 * @flags: same flags as map update elem
699 * int bpf_xdp_adjust_meta(xdp_md, delta)
700 * Adjust the xdp_md.data_meta by delta
701 * @xdp_md: pointer to xdp_md
702 * @delta: An positive/negative integer to be added to xdp_md.data_meta
703 * Return: 0 on success or negative on error
705 * int bpf_perf_event_read_value(map, flags, buf, buf_size)
706 * read perf event counter value and perf event enabled/running time
707 * @map: pointer to perf_event_array map
708 * @flags: index of event in the map or bitmask flags
710 * @buf_size: size of the buf
711 * Return: 0 on success or negative error code
713 * int bpf_perf_prog_read_value(ctx, buf, buf_size)
714 * read perf prog attached perf event counter and enabled/running time
715 * @ctx: pointer to ctx
717 * @buf_size: size of the buf
718 * Return : 0 on success or negative error code
720 * int bpf_override_return(pt_regs, rc)
721 * @pt_regs: pointer to struct pt_regs
722 * @rc: the return value to set
724 #define __BPF_FUNC_MAPPER(FN) \
726 FN(map_lookup_elem), \
727 FN(map_update_elem), \
728 FN(map_delete_elem), \
732 FN(get_prandom_u32), \
733 FN(get_smp_processor_id), \
734 FN(skb_store_bytes), \
735 FN(l3_csum_replace), \
736 FN(l4_csum_replace), \
738 FN(clone_redirect), \
739 FN(get_current_pid_tgid), \
740 FN(get_current_uid_gid), \
741 FN(get_current_comm), \
742 FN(get_cgroup_classid), \
745 FN(skb_get_tunnel_key), \
746 FN(skb_set_tunnel_key), \
747 FN(perf_event_read), \
749 FN(get_route_realm), \
750 FN(perf_event_output), \
751 FN(skb_load_bytes), \
754 FN(skb_get_tunnel_opt), \
755 FN(skb_set_tunnel_opt), \
756 FN(skb_change_proto), \
757 FN(skb_change_type), \
758 FN(skb_under_cgroup), \
759 FN(get_hash_recalc), \
760 FN(get_current_task), \
761 FN(probe_write_user), \
762 FN(current_task_under_cgroup), \
763 FN(skb_change_tail), \
766 FN(set_hash_invalid), \
767 FN(get_numa_node_id), \
768 FN(skb_change_head), \
769 FN(xdp_adjust_head), \
770 FN(probe_read_str), \
771 FN(get_socket_cookie), \
772 FN(get_socket_uid), \
775 FN(skb_adjust_room), \
777 FN(sk_redirect_map), \
778 FN(sock_map_update), \
779 FN(xdp_adjust_meta), \
780 FN(perf_event_read_value), \
781 FN(perf_prog_read_value), \
783 FN(override_return), \
784 FN(sock_ops_cb_flags_set),
786 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
787 * function eBPF program intends to call
789 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
791 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
796 /* All flags used by eBPF helper functions, placed here. */
798 /* BPF_FUNC_skb_store_bytes flags. */
799 #define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
800 #define BPF_F_INVALIDATE_HASH (1ULL << 1)
802 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
803 * First 4 bits are for passing the header field size.
805 #define BPF_F_HDR_FIELD_MASK 0xfULL
807 /* BPF_FUNC_l4_csum_replace flags. */
808 #define BPF_F_PSEUDO_HDR (1ULL << 4)
809 #define BPF_F_MARK_MANGLED_0 (1ULL << 5)
810 #define BPF_F_MARK_ENFORCE (1ULL << 6)
812 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
813 #define BPF_F_INGRESS (1ULL << 0)
815 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
816 #define BPF_F_TUNINFO_IPV6 (1ULL << 0)
818 /* BPF_FUNC_get_stackid flags. */
819 #define BPF_F_SKIP_FIELD_MASK 0xffULL
820 #define BPF_F_USER_STACK (1ULL << 8)
821 #define BPF_F_FAST_STACK_CMP (1ULL << 9)
822 #define BPF_F_REUSE_STACKID (1ULL << 10)
824 /* BPF_FUNC_skb_set_tunnel_key flags. */
825 #define BPF_F_ZERO_CSUM_TX (1ULL << 1)
826 #define BPF_F_DONT_FRAGMENT (1ULL << 2)
828 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
829 * BPF_FUNC_perf_event_read_value flags.
831 #define BPF_F_INDEX_MASK 0xffffffffULL
832 #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
833 /* BPF_FUNC_perf_event_output for sk_buff input context. */
834 #define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
836 /* Mode for BPF_FUNC_skb_adjust_room helper. */
837 enum bpf_adj_room_mode {
841 /* user accessible mirror of in-kernel sk_buff.
842 * new fields can only be added to the end of this structure
854 __u32 ingress_ifindex;
864 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
866 __u32 remote_ip4; /* Stored in network byte order */
867 __u32 local_ip4; /* Stored in network byte order */
868 __u32 remote_ip6[4]; /* Stored in network byte order */
869 __u32 local_ip6[4]; /* Stored in network byte order */
870 __u32 remote_port; /* Stored in network byte order */
871 __u32 local_port; /* stored in host byte order */
877 struct bpf_tunnel_key {
881 __u32 remote_ipv6[4];
889 /* Generic BPF return codes which all BPF program types may support.
890 * The values are binary compatible with their TC_ACT_* counter-part to
891 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
894 * XDP is handled seprately, see XDP_*.
902 /* >127 are reserved for prog type specific return codes */
914 #define XDP_PACKET_HEADROOM 256
916 /* User return codes for XDP prog type.
917 * A valid XDP program must return one of these defined values. All other
918 * return codes are reserved for future use. Unknown return codes will
919 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
929 /* user accessible metadata for XDP packet hook
930 * new fields must be added to the end of this structure
936 /* Below access go through struct xdp_rxq_info */
937 __u32 ingress_ifindex; /* rxq->dev->ifindex */
938 __u32 rx_queue_index; /* rxq->queue_index */
946 /* user accessible metadata for SK_MSG packet hook, new fields must
947 * be added to the end of this structure
954 #define BPF_TAG_SIZE 8
956 struct bpf_prog_info {
959 __u8 tag[BPF_TAG_SIZE];
960 __u32 jited_prog_len;
961 __u32 xlated_prog_len;
962 __aligned_u64 jited_prog_insns;
963 __aligned_u64 xlated_prog_insns;
964 __u64 load_time; /* ns since boottime */
965 __u32 created_by_uid;
967 __aligned_u64 map_ids;
968 char name[BPF_OBJ_NAME_LEN];
972 } __attribute__((aligned(8)));
974 struct bpf_map_info {
981 char name[BPF_OBJ_NAME_LEN];
985 } __attribute__((aligned(8)));
987 /* User bpf_sock_ops struct to access socket values and specify request ops
989 * Some of this fields are in network (bigendian) byte order and may need
990 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
991 * New fields can only be added at the end of this structure
993 struct bpf_sock_ops {
996 __u32 args[4]; /* Optionally passed to bpf program */
997 __u32 reply; /* Returned by bpf program */
998 __u32 replylong[4]; /* Optionally returned by bpf prog */
1001 __u32 remote_ip4; /* Stored in network byte order */
1002 __u32 local_ip4; /* Stored in network byte order */
1003 __u32 remote_ip6[4]; /* Stored in network byte order */
1004 __u32 local_ip6[4]; /* Stored in network byte order */
1005 __u32 remote_port; /* Stored in network byte order */
1006 __u32 local_port; /* stored in host byte order */
1007 __u32 is_fullsock; /* Some TCP fields are only valid if
1008 * there is a full socket. If not, the
1009 * fields read as zero.
1012 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
1013 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
1022 __u32 rate_delivered;
1023 __u32 rate_interval_us;
1026 __u32 total_retrans;
1030 __u32 data_segs_out;
1034 __u64 bytes_received;
1038 /* Definitions for bpf_sock_ops_cb_flags */
1039 #define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
1040 #define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
1041 #define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
1042 #define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
1043 * supported cb flags
1046 /* List of known BPF sock_ops operators.
1047 * New entries can only be added at the end
1051 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
1052 * -1 if default value should be used
1054 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
1055 * window (in packets) or -1 if default
1056 * value should be used
1058 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
1059 * active connection is initialized
1061 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
1062 * active connection is
1065 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
1066 * passive connection is
1069 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
1072 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
1073 * based on the path and may be
1074 * dependent on the congestion control
1075 * algorithm. In general it indicates
1076 * a congestion threshold. RTTs above
1077 * this indicate congestion
1079 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
1080 * Arg1: value of icsk_retransmits
1081 * Arg2: value of icsk_rto
1082 * Arg3: whether RTO has expired
1084 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
1085 * Arg1: sequence number of 1st byte
1087 * Arg3: return value of
1088 * tcp_transmit_skb (0 => success)
1090 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
1096 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
1097 * changes between the TCP and BPF versions. Ideally this should never happen.
1098 * If it does, we need to add code to convert them before calling
1099 * the BPF sock_ops function.
1102 BPF_TCP_ESTABLISHED = 1,
1112 BPF_TCP_CLOSING, /* Now a valid state */
1113 BPF_TCP_NEW_SYN_RECV,
1115 BPF_TCP_MAX_STATES /* Leave at the end! */
1118 #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
1119 #define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
1121 struct bpf_perf_event_value {
1127 #define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
1128 #define BPF_DEVCG_ACC_READ (1ULL << 1)
1129 #define BPF_DEVCG_ACC_WRITE (1ULL << 2)
1131 #define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
1132 #define BPF_DEVCG_DEV_CHAR (1ULL << 1)
1134 struct bpf_cgroup_dev_ctx {
1135 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
1141 #endif /* _UAPI__LINUX_BPF_H__ */