1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2018 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <linux/bpf.h>
19 #include <linux/if_link.h>
20 #include <linux/if_xdp.h>
21 #include <linux/if_ether.h>
28 #include <net/ethernet.h>
29 #include <sys/resource.h>
30 #include <sys/socket.h>
36 #include <sys/types.h>
57 #define NUM_FRAMES 131072
58 #define FRAME_HEADROOM 0
59 #define FRAME_SIZE 2048
60 #define NUM_DESCS 1024
63 #define FQ_NUM_DESCS 1024
64 #define CQ_NUM_DESCS 1024
66 #define DEBUG_HEXDUMP 0
70 static unsigned long prev_time;
78 static enum benchmark_type opt_bench = BENCH_RXDROP;
79 static u32 opt_xdp_flags;
80 static const char *opt_if = "";
81 static int opt_ifindex;
84 static int opt_shared_packet_buffer;
85 static int opt_interval = 1;
87 struct xdp_umem_uqueue {
92 struct xdp_umem_ring *ring;
96 char (*frames)[FRAME_SIZE];
97 struct xdp_umem_uqueue fq;
98 struct xdp_umem_uqueue cq;
107 struct xdp_rxtx_ring *ring;
111 struct xdp_uqueue rx;
112 struct xdp_uqueue tx;
114 struct xdp_umem *umem;
116 unsigned long rx_npkts;
117 unsigned long tx_npkts;
118 unsigned long prev_rx_npkts;
119 unsigned long prev_tx_npkts;
123 static int num_socks;
124 struct xdpsock *xsks[MAX_SOCKS];
126 static unsigned long get_nsecs(void)
130 clock_gettime(CLOCK_MONOTONIC, &ts);
131 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
134 static void dump_stats(void);
136 #define lassert(expr) \
139 fprintf(stderr, "%s:%s:%i: Assertion failed: " \
140 #expr ": errno: %d/\"%s\"\n", \
141 __FILE__, __func__, __LINE__, \
142 errno, strerror(errno)); \
144 exit(EXIT_FAILURE); \
148 #define barrier() __asm__ __volatile__("": : :"memory")
149 #define u_smp_rmb() barrier()
150 #define u_smp_wmb() barrier()
151 #define likely(x) __builtin_expect(!!(x), 1)
152 #define unlikely(x) __builtin_expect(!!(x), 0)
154 static const char pkt_data[] =
155 "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
156 "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
157 "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
158 "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
160 static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
162 u32 free_entries = q->size - (q->cached_prod - q->cached_cons);
164 if (free_entries >= nb)
167 /* Refresh the local tail pointer */
168 q->cached_cons = q->ring->ptrs.consumer;
170 return q->size - (q->cached_prod - q->cached_cons);
173 static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
175 u32 free_entries = q->cached_cons - q->cached_prod;
177 if (free_entries >= ndescs)
180 /* Refresh the local tail pointer */
181 q->cached_cons = q->ring->ptrs.consumer + q->size;
182 return q->cached_cons - q->cached_prod;
185 static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
187 u32 entries = q->cached_prod - q->cached_cons;
190 q->cached_prod = q->ring->ptrs.producer;
191 entries = q->cached_prod - q->cached_cons;
194 return (entries > nb) ? nb : entries;
197 static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
199 u32 entries = q->cached_prod - q->cached_cons;
202 q->cached_prod = q->ring->ptrs.producer;
203 entries = q->cached_prod - q->cached_cons;
206 return (entries > ndescs) ? ndescs : entries;
209 static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
215 if (umem_nb_free(fq, nb) < nb)
218 for (i = 0; i < nb; i++) {
219 u32 idx = fq->cached_prod++ & fq->mask;
221 fq->ring->desc[idx] = d[i].idx;
226 fq->ring->ptrs.producer = fq->cached_prod;
231 static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u32 *d,
236 if (umem_nb_free(fq, nb) < nb)
239 for (i = 0; i < nb; i++) {
240 u32 idx = fq->cached_prod++ & fq->mask;
242 fq->ring->desc[idx] = d[i];
247 fq->ring->ptrs.producer = fq->cached_prod;
252 static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
255 u32 idx, i, entries = umem_nb_avail(cq, nb);
259 for (i = 0; i < entries; i++) {
260 idx = cq->cached_cons++ & cq->mask;
261 d[i] = cq->ring->desc[idx];
267 cq->ring->ptrs.consumer = cq->cached_cons;
273 static inline void *xq_get_data(struct xdpsock *xsk, __u32 idx, __u32 off)
275 lassert(idx < NUM_FRAMES);
276 return &xsk->umem->frames[idx][off];
279 static inline int xq_enq(struct xdp_uqueue *uq,
280 const struct xdp_desc *descs,
283 struct xdp_rxtx_ring *r = uq->ring;
286 if (xq_nb_free(uq, ndescs) < ndescs)
289 for (i = 0; i < ndescs; i++) {
290 u32 idx = uq->cached_prod++ & uq->mask;
292 r->desc[idx].idx = descs[i].idx;
293 r->desc[idx].len = descs[i].len;
294 r->desc[idx].offset = descs[i].offset;
299 r->ptrs.producer = uq->cached_prod;
303 static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
304 __u32 idx, unsigned int ndescs)
306 struct xdp_rxtx_ring *q = uq->ring;
309 if (xq_nb_free(uq, ndescs) < ndescs)
312 for (i = 0; i < ndescs; i++) {
313 u32 idx = uq->cached_prod++ & uq->mask;
315 q->desc[idx].idx = idx + i;
316 q->desc[idx].len = sizeof(pkt_data) - 1;
317 q->desc[idx].offset = 0;
322 q->ptrs.producer = uq->cached_prod;
326 static inline int xq_deq(struct xdp_uqueue *uq,
327 struct xdp_desc *descs,
330 struct xdp_rxtx_ring *r = uq->ring;
334 entries = xq_nb_avail(uq, ndescs);
338 for (i = 0; i < entries; i++) {
339 idx = uq->cached_cons++ & uq->mask;
340 descs[i] = r->desc[idx];
346 r->ptrs.consumer = uq->cached_cons;
352 static void swap_mac_addresses(void *data)
354 struct ether_header *eth = (struct ether_header *)data;
355 struct ether_addr *src_addr = (struct ether_addr *)ð->ether_shost;
356 struct ether_addr *dst_addr = (struct ether_addr *)ð->ether_dhost;
357 struct ether_addr tmp;
360 *src_addr = *dst_addr;
365 static void hex_dump(void *pkt, size_t length, const char *prefix)
368 const unsigned char *address = (unsigned char *)pkt;
369 const unsigned char *line = address;
370 size_t line_size = 32;
373 printf("length = %zu\n", length);
374 printf("%s | ", prefix);
375 while (length-- > 0) {
376 printf("%02X ", *address++);
377 if (!(++i % line_size) || (length == 0 && i % line_size)) {
379 while (i++ % line_size)
382 printf(" | "); /* right close */
383 while (line < address) {
385 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
389 printf("%s | ", prefix);
396 static size_t gen_eth_frame(char *frame)
398 memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
399 return sizeof(pkt_data) - 1;
402 static struct xdp_umem *xdp_umem_configure(int sfd)
404 int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
405 struct xdp_umem_reg mr;
406 struct xdp_umem *umem;
409 umem = calloc(1, sizeof(*umem));
412 lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
413 NUM_FRAMES * FRAME_SIZE) == 0);
415 mr.addr = (__u64)bufs;
416 mr.len = NUM_FRAMES * FRAME_SIZE;
417 mr.frame_size = FRAME_SIZE;
418 mr.frame_headroom = FRAME_HEADROOM;
420 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
421 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
423 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
426 umem->fq.ring = mmap(0, sizeof(struct xdp_umem_ring) +
427 FQ_NUM_DESCS * sizeof(u32),
428 PROT_READ | PROT_WRITE,
429 MAP_SHARED | MAP_POPULATE, sfd,
430 XDP_UMEM_PGOFF_FILL_RING);
431 lassert(umem->fq.ring != MAP_FAILED);
433 umem->fq.mask = FQ_NUM_DESCS - 1;
434 umem->fq.size = FQ_NUM_DESCS;
436 umem->cq.ring = mmap(0, sizeof(struct xdp_umem_ring) +
437 CQ_NUM_DESCS * sizeof(u32),
438 PROT_READ | PROT_WRITE,
439 MAP_SHARED | MAP_POPULATE, sfd,
440 XDP_UMEM_PGOFF_COMPLETION_RING);
441 lassert(umem->cq.ring != MAP_FAILED);
443 umem->cq.mask = CQ_NUM_DESCS - 1;
444 umem->cq.size = CQ_NUM_DESCS;
446 umem->frames = (char (*)[FRAME_SIZE])bufs;
449 if (opt_bench == BENCH_TXONLY) {
452 for (i = 0; i < NUM_FRAMES; i++)
453 (void)gen_eth_frame(&umem->frames[i][0]);
459 static struct xdpsock *xsk_configure(struct xdp_umem *umem)
461 struct sockaddr_xdp sxdp = {};
462 int sfd, ndescs = NUM_DESCS;
467 sfd = socket(PF_XDP, SOCK_RAW, 0);
470 xsk = calloc(1, sizeof(*xsk));
474 xsk->outstanding_tx = 0;
478 xsk->umem = xdp_umem_configure(sfd);
483 lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
484 &ndescs, sizeof(int)) == 0);
485 lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
486 &ndescs, sizeof(int)) == 0);
489 xsk->rx.ring = mmap(NULL,
490 sizeof(struct xdp_ring) +
491 NUM_DESCS * sizeof(struct xdp_desc),
492 PROT_READ | PROT_WRITE,
493 MAP_SHARED | MAP_POPULATE, sfd,
495 lassert(xsk->rx.ring != MAP_FAILED);
498 for (i = 0; i < NUM_DESCS / 2; i++)
499 lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
504 xsk->tx.ring = mmap(NULL,
505 sizeof(struct xdp_ring) +
506 NUM_DESCS * sizeof(struct xdp_desc),
507 PROT_READ | PROT_WRITE,
508 MAP_SHARED | MAP_POPULATE, sfd,
510 lassert(xsk->tx.ring != MAP_FAILED);
512 xsk->rx.mask = NUM_DESCS - 1;
513 xsk->rx.size = NUM_DESCS;
515 xsk->tx.mask = NUM_DESCS - 1;
516 xsk->tx.size = NUM_DESCS;
518 sxdp.sxdp_family = PF_XDP;
519 sxdp.sxdp_ifindex = opt_ifindex;
520 sxdp.sxdp_queue_id = opt_queue;
522 sxdp.sxdp_flags = XDP_SHARED_UMEM;
523 sxdp.sxdp_shared_umem_fd = umem->fd;
526 lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
531 static void print_benchmark(bool running)
533 const char *bench_str = "INVALID";
535 if (opt_bench == BENCH_RXDROP)
536 bench_str = "rxdrop";
537 else if (opt_bench == BENCH_TXONLY)
538 bench_str = "txonly";
539 else if (opt_bench == BENCH_L2FWD)
542 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
543 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
545 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
554 printf("running...");
559 static void dump_stats(void)
561 unsigned long now = get_nsecs();
562 long dt = now - prev_time;
567 for (i = 0; i < num_socks; i++) {
568 char *fmt = "%-15s %'-11.0f %'-11lu\n";
569 double rx_pps, tx_pps;
571 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
573 tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
576 printf("\n sock%d@", i);
577 print_benchmark(false);
580 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
582 printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
583 printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
585 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
586 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
590 static void *poller(void *arg)
601 static void int_exit(int sig)
605 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
609 static struct option long_options[] = {
610 {"rxdrop", no_argument, 0, 'r'},
611 {"txonly", no_argument, 0, 't'},
612 {"l2fwd", no_argument, 0, 'l'},
613 {"interface", required_argument, 0, 'i'},
614 {"queue", required_argument, 0, 'q'},
615 {"poll", no_argument, 0, 'p'},
616 {"shared-buffer", no_argument, 0, 's'},
617 {"xdp-skb", no_argument, 0, 'S'},
618 {"xdp-native", no_argument, 0, 'N'},
619 {"interval", required_argument, 0, 'n'},
623 static void usage(const char *prog)
626 " Usage: %s [OPTIONS]\n"
628 " -r, --rxdrop Discard all incoming packets (default)\n"
629 " -t, --txonly Only send packets\n"
630 " -l, --l2fwd MAC swap L2 forwarding\n"
631 " -i, --interface=n Run on interface n\n"
632 " -q, --queue=n Use queue n (default 0)\n"
633 " -p, --poll Use poll syscall\n"
634 " -s, --shared-buffer Use shared packet buffer\n"
635 " -S, --xdp-skb=n Use XDP skb-mod\n"
636 " -N, --xdp-native=n Enfore XDP native mode\n"
637 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
639 fprintf(stderr, str, prog);
643 static void parse_command_line(int argc, char **argv)
650 c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options,
657 opt_bench = BENCH_RXDROP;
660 opt_bench = BENCH_TXONLY;
663 opt_bench = BENCH_L2FWD;
669 opt_queue = atoi(optarg);
672 opt_shared_packet_buffer = 1;
678 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
681 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
684 opt_interval = atoi(optarg);
687 usage(basename(argv[0]));
691 opt_ifindex = if_nametoindex(opt_if);
693 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
695 usage(basename(argv[0]));
699 static void kick_tx(int fd)
703 ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
704 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
709 static inline void complete_tx_l2fwd(struct xdpsock *xsk)
711 u32 descs[BATCH_SIZE];
715 if (!xsk->outstanding_tx)
719 ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
722 /* re-add completed Tx buffers */
723 rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
725 umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
726 xsk->outstanding_tx -= rcvd;
727 xsk->tx_npkts += rcvd;
731 static inline void complete_tx_only(struct xdpsock *xsk)
733 u32 descs[BATCH_SIZE];
736 if (!xsk->outstanding_tx)
741 rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
743 xsk->outstanding_tx -= rcvd;
744 xsk->tx_npkts += rcvd;
748 static void rx_drop(struct xdpsock *xsk)
750 struct xdp_desc descs[BATCH_SIZE];
751 unsigned int rcvd, i;
753 rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
757 for (i = 0; i < rcvd; i++) {
758 u32 idx = descs[i].idx;
760 lassert(idx < NUM_FRAMES);
765 pkt = xq_get_data(xsk, idx, descs[i].offset);
766 sprintf(buf, "idx=%d", idx);
767 hex_dump(pkt, descs[i].len, buf);
771 xsk->rx_npkts += rcvd;
773 umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
776 static void rx_drop_all(void)
778 struct pollfd fds[MAX_SOCKS + 1];
779 int i, ret, timeout, nfds = 1;
781 memset(fds, 0, sizeof(fds));
783 for (i = 0; i < num_socks; i++) {
784 fds[i].fd = xsks[i]->sfd;
785 fds[i].events = POLLIN;
786 timeout = 1000; /* 1sn */
791 ret = poll(fds, nfds, timeout);
796 for (i = 0; i < num_socks; i++)
801 static void tx_only(struct xdpsock *xsk)
803 int timeout, ret, nfds = 1;
804 struct pollfd fds[nfds + 1];
805 unsigned int idx = 0;
807 memset(fds, 0, sizeof(fds));
808 fds[0].fd = xsk->sfd;
809 fds[0].events = POLLOUT;
810 timeout = 1000; /* 1sn */
814 ret = poll(fds, nfds, timeout);
818 if (fds[0].fd != xsk->sfd ||
819 !(fds[0].revents & POLLOUT))
823 if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
824 lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
826 xsk->outstanding_tx += BATCH_SIZE;
831 complete_tx_only(xsk);
835 static void l2fwd(struct xdpsock *xsk)
838 struct xdp_desc descs[BATCH_SIZE];
839 unsigned int rcvd, i;
843 complete_tx_l2fwd(xsk);
845 rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
850 for (i = 0; i < rcvd; i++) {
851 char *pkt = xq_get_data(xsk, descs[i].idx,
854 swap_mac_addresses(pkt);
857 u32 idx = descs[i].idx;
859 sprintf(buf, "idx=%d", idx);
860 hex_dump(pkt, descs[i].len, buf);
864 xsk->rx_npkts += rcvd;
866 ret = xq_enq(&xsk->tx, descs, rcvd);
868 xsk->outstanding_tx += rcvd;
872 int main(int argc, char **argv)
874 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
875 char xdp_filename[256];
879 parse_command_line(argc, argv);
881 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
882 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
887 snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
889 if (load_bpf_file(xdp_filename)) {
890 fprintf(stderr, "ERROR: load_bpf_file %s\n", bpf_log_buf);
895 fprintf(stderr, "ERROR: load_bpf_file: \"%s\"\n",
900 if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd[0], opt_xdp_flags) < 0) {
901 fprintf(stderr, "ERROR: link set xdp fd failed\n");
905 ret = bpf_map_update_elem(map_fd[0], &key, &opt_queue, 0);
907 fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
911 /* Create sockets... */
912 xsks[num_socks++] = xsk_configure(NULL);
915 for (i = 0; i < MAX_SOCKS - 1; i++)
916 xsks[num_socks++] = xsk_configure(xsks[0]->umem);
919 /* ...and insert them into the map. */
920 for (i = 0; i < num_socks; i++) {
922 ret = bpf_map_update_elem(map_fd[1], &key, &xsks[i]->sfd, 0);
924 fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
929 signal(SIGINT, int_exit);
930 signal(SIGTERM, int_exit);
931 signal(SIGABRT, int_exit);
933 setlocale(LC_ALL, "");
935 ret = pthread_create(&pt, NULL, poller, NULL);
938 prev_time = get_nsecs();
940 if (opt_bench == BENCH_RXDROP)
942 else if (opt_bench == BENCH_TXONLY)