1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * AF_XDP user-space access library.
6 * Copyright(c) 2018 - 2019 Intel Corporation.
8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
15 #include <arpa/inet.h>
16 #include <asm/barrier.h>
17 #include <linux/compiler.h>
18 #include <linux/ethtool.h>
19 #include <linux/filter.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_packet.h>
22 #include <linux/if_xdp.h>
23 #include <linux/sockios.h>
25 #include <sys/ioctl.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
32 #include "libbpf_internal.h"
48 struct xsk_ring_prod *fill;
49 struct xsk_ring_cons *comp;
51 struct xsk_umem_config config;
57 struct xsk_ring_cons *rx;
58 struct xsk_ring_prod *tx;
60 struct xsk_umem *umem;
61 struct xsk_socket_config config;
67 char ifname[IFNAMSIZ];
72 bool xdp_prog_attached;
77 int xsk_umem__fd(const struct xsk_umem *umem)
79 return umem ? umem->fd : -EINVAL;
82 int xsk_socket__fd(const struct xsk_socket *xsk)
84 return xsk ? xsk->fd : -EINVAL;
87 static bool xsk_page_aligned(void *buffer)
89 unsigned long addr = (unsigned long)buffer;
91 return !(addr & (getpagesize() - 1));
94 static void xsk_set_umem_config(struct xsk_umem_config *cfg,
95 const struct xsk_umem_config *usr_cfg)
98 cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
99 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
100 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
101 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
102 cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
106 cfg->fill_size = usr_cfg->fill_size;
107 cfg->comp_size = usr_cfg->comp_size;
108 cfg->frame_size = usr_cfg->frame_size;
109 cfg->frame_headroom = usr_cfg->frame_headroom;
110 cfg->flags = usr_cfg->flags;
113 static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
114 const struct xsk_socket_config *usr_cfg)
117 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
118 cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
119 cfg->libbpf_flags = 0;
125 if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
128 cfg->rx_size = usr_cfg->rx_size;
129 cfg->tx_size = usr_cfg->tx_size;
130 cfg->libbpf_flags = usr_cfg->libbpf_flags;
131 cfg->xdp_flags = usr_cfg->xdp_flags;
132 cfg->bind_flags = usr_cfg->bind_flags;
137 int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
138 __u64 size, struct xsk_ring_prod *fill,
139 struct xsk_ring_cons *comp,
140 const struct xsk_umem_config *usr_config)
142 struct xdp_mmap_offsets off;
143 struct xdp_umem_reg mr;
144 struct xsk_umem *umem;
149 if (!umem_area || !umem_ptr || !fill || !comp)
151 if (!size && !xsk_page_aligned(umem_area))
154 umem = calloc(1, sizeof(*umem));
158 umem->fd = socket(AF_XDP, SOCK_RAW, 0);
164 umem->umem_area = umem_area;
165 xsk_set_umem_config(&umem->config, usr_config);
167 mr.addr = (uintptr_t)umem_area;
169 mr.chunk_size = umem->config.frame_size;
170 mr.headroom = umem->config.frame_headroom;
171 mr.flags = umem->config.flags;
173 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
178 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
179 &umem->config.fill_size,
180 sizeof(umem->config.fill_size));
185 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
186 &umem->config.comp_size,
187 sizeof(umem->config.comp_size));
193 optlen = sizeof(off);
194 err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
200 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
201 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
202 XDP_UMEM_PGOFF_FILL_RING);
203 if (map == MAP_FAILED) {
209 fill->mask = umem->config.fill_size - 1;
210 fill->size = umem->config.fill_size;
211 fill->producer = map + off.fr.producer;
212 fill->consumer = map + off.fr.consumer;
213 fill->flags = map + off.fr.flags;
214 fill->ring = map + off.fr.desc;
215 fill->cached_cons = umem->config.fill_size;
217 map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
218 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
219 XDP_UMEM_PGOFF_COMPLETION_RING);
220 if (map == MAP_FAILED) {
226 comp->mask = umem->config.comp_size - 1;
227 comp->size = umem->config.comp_size;
228 comp->producer = map + off.cr.producer;
229 comp->consumer = map + off.cr.consumer;
230 comp->flags = map + off.cr.flags;
231 comp->ring = map + off.cr.desc;
237 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
245 struct xsk_umem_config_v1 {
249 __u32 frame_headroom;
252 int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
253 __u64 size, struct xsk_ring_prod *fill,
254 struct xsk_ring_cons *comp,
255 const struct xsk_umem_config *usr_config)
257 struct xsk_umem_config config;
259 memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
262 return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
265 asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2");
266 asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4");
268 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
270 static const int log_buf_size = 16 * 1024;
271 char log_buf[log_buf_size];
274 /* This is the C-program:
275 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
277 * int index = ctx->rx_queue_index;
279 * // A set entry here means that the correspnding queue_id
280 * // has an active AF_XDP socket bound to it.
281 * if (bpf_map_lookup_elem(&xsks_map, &index))
282 * return bpf_redirect_map(&xsks_map, index, 0);
287 struct bpf_insn prog[] = {
288 /* r1 = *(u32 *)(r1 + 16) */
289 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
290 /* *(u32 *)(r10 - 4) = r1 */
291 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
292 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
294 BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
295 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
296 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
297 BPF_MOV32_IMM(BPF_REG_0, 2),
298 /* if r1 == 0 goto +5 */
299 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
300 /* r2 = *(u32 *)(r10 - 4) */
301 BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
302 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
303 BPF_MOV32_IMM(BPF_REG_3, 0),
304 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
305 /* The jumps are to this instruction */
308 size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
310 prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
311 "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
314 pr_warning("BPF log buffer:\n%s", log_buf);
318 err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
324 xsk->prog_fd = prog_fd;
328 static int xsk_get_max_queues(struct xsk_socket *xsk)
330 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
331 struct ifreq ifr = {};
334 fd = socket(AF_INET, SOCK_DGRAM, 0);
338 ifr.ifr_data = (void *)&channels;
339 memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
340 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
341 err = ioctl(fd, SIOCETHTOOL, &ifr);
342 if (err && errno != EOPNOTSUPP) {
347 if (err || channels.max_combined == 0)
348 /* If the device says it has no channels, then all traffic
349 * is sent to a single stream, so max queues = 1.
353 ret = channels.max_combined;
360 static int xsk_create_bpf_maps(struct xsk_socket *xsk)
365 max_queues = xsk_get_max_queues(xsk);
369 fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
370 sizeof(int), sizeof(int), max_queues, 0);
374 xsk->xsks_map_fd = fd;
379 static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
381 bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
382 close(xsk->xsks_map_fd);
385 static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
387 __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
388 __u32 map_len = sizeof(struct bpf_map_info);
389 struct bpf_prog_info prog_info = {};
390 struct bpf_map_info map_info;
393 err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
397 num_maps = prog_info.nr_map_ids;
399 map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
403 memset(&prog_info, 0, prog_len);
404 prog_info.nr_map_ids = num_maps;
405 prog_info.map_ids = (__u64)(unsigned long)map_ids;
407 err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
411 xsk->xsks_map_fd = -1;
413 for (i = 0; i < prog_info.nr_map_ids; i++) {
414 fd = bpf_map_get_fd_by_id(map_ids[i]);
418 err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
424 if (!strcmp(map_info.name, "xsks_map")) {
425 xsk->xsks_map_fd = fd;
433 if (xsk->xsks_map_fd == -1)
441 static int xsk_set_bpf_maps(struct xsk_socket *xsk)
443 return bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
447 static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
452 err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
453 xsk->config.xdp_flags);
458 err = xsk_create_bpf_maps(xsk);
462 err = xsk_load_xdp_prog(xsk);
464 xsk_delete_bpf_maps(xsk);
468 xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
469 err = xsk_lookup_bpf_maps(xsk);
476 err = xsk_set_bpf_maps(xsk);
478 xsk_delete_bpf_maps(xsk);
486 int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
487 __u32 queue_id, struct xsk_umem *umem,
488 struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
489 const struct xsk_socket_config *usr_config)
491 void *rx_map = NULL, *tx_map = NULL;
492 struct sockaddr_xdp sxdp = {};
493 struct xdp_mmap_offsets off;
494 struct xdp_options opts;
495 struct xsk_socket *xsk;
499 if (!umem || !xsk_ptr || !rx || !tx)
502 if (umem->refcount) {
503 pr_warning("Error: shared umems not supported by libbpf.\n");
507 xsk = calloc(1, sizeof(*xsk));
511 if (umem->refcount++ > 0) {
512 xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
521 xsk->outstanding_tx = 0;
522 xsk->queue_id = queue_id;
524 xsk->ifindex = if_nametoindex(ifname);
529 memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
530 xsk->ifname[IFNAMSIZ - 1] = '\0';
532 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
537 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
538 &xsk->config.rx_size,
539 sizeof(xsk->config.rx_size));
546 err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
547 &xsk->config.tx_size,
548 sizeof(xsk->config.tx_size));
555 optlen = sizeof(off);
556 err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
563 rx_map = mmap(NULL, off.rx.desc +
564 xsk->config.rx_size * sizeof(struct xdp_desc),
565 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
566 xsk->fd, XDP_PGOFF_RX_RING);
567 if (rx_map == MAP_FAILED) {
572 rx->mask = xsk->config.rx_size - 1;
573 rx->size = xsk->config.rx_size;
574 rx->producer = rx_map + off.rx.producer;
575 rx->consumer = rx_map + off.rx.consumer;
576 rx->flags = rx_map + off.rx.flags;
577 rx->ring = rx_map + off.rx.desc;
582 tx_map = mmap(NULL, off.tx.desc +
583 xsk->config.tx_size * sizeof(struct xdp_desc),
584 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
585 xsk->fd, XDP_PGOFF_TX_RING);
586 if (tx_map == MAP_FAILED) {
591 tx->mask = xsk->config.tx_size - 1;
592 tx->size = xsk->config.tx_size;
593 tx->producer = tx_map + off.tx.producer;
594 tx->consumer = tx_map + off.tx.consumer;
595 tx->flags = tx_map + off.tx.flags;
596 tx->ring = tx_map + off.tx.desc;
597 tx->cached_cons = xsk->config.tx_size;
601 sxdp.sxdp_family = PF_XDP;
602 sxdp.sxdp_ifindex = xsk->ifindex;
603 sxdp.sxdp_queue_id = xsk->queue_id;
604 sxdp.sxdp_flags = xsk->config.bind_flags;
606 err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
614 optlen = sizeof(opts);
615 err = getsockopt(xsk->fd, SOL_XDP, XDP_OPTIONS, &opts, &optlen);
621 xsk->zc = opts.flags & XDP_OPTIONS_ZEROCOPY;
623 if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
624 err = xsk_setup_xdp_prog(xsk);
634 munmap(tx_map, off.tx.desc +
635 xsk->config.tx_size * sizeof(struct xdp_desc));
638 munmap(rx_map, off.rx.desc +
639 xsk->config.rx_size * sizeof(struct xdp_desc));
641 if (--umem->refcount)
648 int xsk_umem__delete(struct xsk_umem *umem)
650 struct xdp_mmap_offsets off;
660 optlen = sizeof(off);
661 err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
663 munmap(umem->fill->ring - off.fr.desc,
664 off.fr.desc + umem->config.fill_size * sizeof(__u64));
665 munmap(umem->comp->ring - off.cr.desc,
666 off.cr.desc + umem->config.comp_size * sizeof(__u64));
675 void xsk_socket__delete(struct xsk_socket *xsk)
677 size_t desc_sz = sizeof(struct xdp_desc);
678 struct xdp_mmap_offsets off;
685 if (xsk->prog_fd != -1) {
686 xsk_delete_bpf_maps(xsk);
690 optlen = sizeof(off);
691 err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
694 munmap(xsk->rx->ring - off.rx.desc,
695 off.rx.desc + xsk->config.rx_size * desc_sz);
698 munmap(xsk->tx->ring - off.tx.desc,
699 off.tx.desc + xsk->config.tx_size * desc_sz);
704 xsk->umem->refcount--;
705 /* Do not close an fd that also has an associated umem connected
708 if (xsk->fd != xsk->umem->fd)