]> asedeno.scripts.mit.edu Git - linux.git/blob - samples/bpf/xdpsock_user.c
samples/bpf: Check the prog id before exiting
[linux.git] / samples / bpf / xdpsock_user.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2018 Intel Corporation. */
3
4 #include <assert.h>
5 #include <errno.h>
6 #include <getopt.h>
7 #include <libgen.h>
8 #include <linux/bpf.h>
9 #include <linux/if_link.h>
10 #include <linux/if_xdp.h>
11 #include <linux/if_ether.h>
12 #include <net/if.h>
13 #include <signal.h>
14 #include <stdbool.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <net/ethernet.h>
19 #include <sys/resource.h>
20 #include <sys/socket.h>
21 #include <sys/mman.h>
22 #include <time.h>
23 #include <unistd.h>
24 #include <pthread.h>
25 #include <locale.h>
26 #include <sys/types.h>
27 #include <poll.h>
28
29 #include "bpf/libbpf.h"
30 #include "bpf_util.h"
31 #include <bpf/bpf.h>
32
33 #include "xdpsock.h"
34
35 #ifndef SOL_XDP
36 #define SOL_XDP 283
37 #endif
38
39 #ifndef AF_XDP
40 #define AF_XDP 44
41 #endif
42
43 #ifndef PF_XDP
44 #define PF_XDP AF_XDP
45 #endif
46
47 #define NUM_FRAMES 131072
48 #define FRAME_HEADROOM 0
49 #define FRAME_SHIFT 11
50 #define FRAME_SIZE 2048
51 #define NUM_DESCS 1024
52 #define BATCH_SIZE 16
53
54 #define FQ_NUM_DESCS 1024
55 #define CQ_NUM_DESCS 1024
56
57 #define DEBUG_HEXDUMP 0
58
59 typedef __u64 u64;
60 typedef __u32 u32;
61
62 static unsigned long prev_time;
63
64 enum benchmark_type {
65         BENCH_RXDROP = 0,
66         BENCH_TXONLY = 1,
67         BENCH_L2FWD = 2,
68 };
69
70 static enum benchmark_type opt_bench = BENCH_RXDROP;
71 static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
72 static const char *opt_if = "";
73 static int opt_ifindex;
74 static int opt_queue;
75 static int opt_poll;
76 static int opt_shared_packet_buffer;
77 static int opt_interval = 1;
78 static u32 opt_xdp_bind_flags;
79 static __u32 prog_id;
80
81 struct xdp_umem_uqueue {
82         u32 cached_prod;
83         u32 cached_cons;
84         u32 mask;
85         u32 size;
86         u32 *producer;
87         u32 *consumer;
88         u64 *ring;
89         void *map;
90 };
91
92 struct xdp_umem {
93         char *frames;
94         struct xdp_umem_uqueue fq;
95         struct xdp_umem_uqueue cq;
96         int fd;
97 };
98
99 struct xdp_uqueue {
100         u32 cached_prod;
101         u32 cached_cons;
102         u32 mask;
103         u32 size;
104         u32 *producer;
105         u32 *consumer;
106         struct xdp_desc *ring;
107         void *map;
108 };
109
110 struct xdpsock {
111         struct xdp_uqueue rx;
112         struct xdp_uqueue tx;
113         int sfd;
114         struct xdp_umem *umem;
115         u32 outstanding_tx;
116         unsigned long rx_npkts;
117         unsigned long tx_npkts;
118         unsigned long prev_rx_npkts;
119         unsigned long prev_tx_npkts;
120 };
121
122 static int num_socks;
123 struct xdpsock *xsks[MAX_SOCKS];
124
125 static unsigned long get_nsecs(void)
126 {
127         struct timespec ts;
128
129         clock_gettime(CLOCK_MONOTONIC, &ts);
130         return ts.tv_sec * 1000000000UL + ts.tv_nsec;
131 }
132
133 static void dump_stats(void);
134
135 #define lassert(expr)                                                   \
136         do {                                                            \
137                 if (!(expr)) {                                          \
138                         fprintf(stderr, "%s:%s:%i: Assertion failed: "  \
139                                 #expr ": errno: %d/\"%s\"\n",           \
140                                 __FILE__, __func__, __LINE__,           \
141                                 errno, strerror(errno));                \
142                         dump_stats();                                   \
143                         exit(EXIT_FAILURE);                             \
144                 }                                                       \
145         } while (0)
146
147 #define barrier() __asm__ __volatile__("": : :"memory")
148 #ifdef __aarch64__
149 #define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
150 #define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
151 #else
152 #define u_smp_rmb() barrier()
153 #define u_smp_wmb() barrier()
154 #endif
155 #define likely(x) __builtin_expect(!!(x), 1)
156 #define unlikely(x) __builtin_expect(!!(x), 0)
157
158 static const char pkt_data[] =
159         "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
160         "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
161         "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
162         "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
163
164 static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
165 {
166         u32 free_entries = q->cached_cons - q->cached_prod;
167
168         if (free_entries >= nb)
169                 return free_entries;
170
171         /* Refresh the local tail pointer */
172         q->cached_cons = *q->consumer + q->size;
173
174         return q->cached_cons - q->cached_prod;
175 }
176
177 static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
178 {
179         u32 free_entries = q->cached_cons - q->cached_prod;
180
181         if (free_entries >= ndescs)
182                 return free_entries;
183
184         /* Refresh the local tail pointer */
185         q->cached_cons = *q->consumer + q->size;
186         return q->cached_cons - q->cached_prod;
187 }
188
189 static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
190 {
191         u32 entries = q->cached_prod - q->cached_cons;
192
193         if (entries == 0) {
194                 q->cached_prod = *q->producer;
195                 entries = q->cached_prod - q->cached_cons;
196         }
197
198         return (entries > nb) ? nb : entries;
199 }
200
201 static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
202 {
203         u32 entries = q->cached_prod - q->cached_cons;
204
205         if (entries == 0) {
206                 q->cached_prod = *q->producer;
207                 entries = q->cached_prod - q->cached_cons;
208         }
209
210         return (entries > ndescs) ? ndescs : entries;
211 }
212
213 static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
214                                          struct xdp_desc *d,
215                                          size_t nb)
216 {
217         u32 i;
218
219         if (umem_nb_free(fq, nb) < nb)
220                 return -ENOSPC;
221
222         for (i = 0; i < nb; i++) {
223                 u32 idx = fq->cached_prod++ & fq->mask;
224
225                 fq->ring[idx] = d[i].addr;
226         }
227
228         u_smp_wmb();
229
230         *fq->producer = fq->cached_prod;
231
232         return 0;
233 }
234
235 static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
236                                       size_t nb)
237 {
238         u32 i;
239
240         if (umem_nb_free(fq, nb) < nb)
241                 return -ENOSPC;
242
243         for (i = 0; i < nb; i++) {
244                 u32 idx = fq->cached_prod++ & fq->mask;
245
246                 fq->ring[idx] = d[i];
247         }
248
249         u_smp_wmb();
250
251         *fq->producer = fq->cached_prod;
252
253         return 0;
254 }
255
256 static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
257                                                u64 *d, size_t nb)
258 {
259         u32 idx, i, entries = umem_nb_avail(cq, nb);
260
261         u_smp_rmb();
262
263         for (i = 0; i < entries; i++) {
264                 idx = cq->cached_cons++ & cq->mask;
265                 d[i] = cq->ring[idx];
266         }
267
268         if (entries > 0) {
269                 u_smp_wmb();
270
271                 *cq->consumer = cq->cached_cons;
272         }
273
274         return entries;
275 }
276
277 static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
278 {
279         return &xsk->umem->frames[addr];
280 }
281
282 static inline int xq_enq(struct xdp_uqueue *uq,
283                          const struct xdp_desc *descs,
284                          unsigned int ndescs)
285 {
286         struct xdp_desc *r = uq->ring;
287         unsigned int i;
288
289         if (xq_nb_free(uq, ndescs) < ndescs)
290                 return -ENOSPC;
291
292         for (i = 0; i < ndescs; i++) {
293                 u32 idx = uq->cached_prod++ & uq->mask;
294
295                 r[idx].addr = descs[i].addr;
296                 r[idx].len = descs[i].len;
297         }
298
299         u_smp_wmb();
300
301         *uq->producer = uq->cached_prod;
302         return 0;
303 }
304
305 static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
306                                  unsigned int id, unsigned int ndescs)
307 {
308         struct xdp_desc *r = uq->ring;
309         unsigned int i;
310
311         if (xq_nb_free(uq, ndescs) < ndescs)
312                 return -ENOSPC;
313
314         for (i = 0; i < ndescs; i++) {
315                 u32 idx = uq->cached_prod++ & uq->mask;
316
317                 r[idx].addr     = (id + i) << FRAME_SHIFT;
318                 r[idx].len      = sizeof(pkt_data) - 1;
319         }
320
321         u_smp_wmb();
322
323         *uq->producer = uq->cached_prod;
324         return 0;
325 }
326
327 static inline int xq_deq(struct xdp_uqueue *uq,
328                          struct xdp_desc *descs,
329                          int ndescs)
330 {
331         struct xdp_desc *r = uq->ring;
332         unsigned int idx;
333         int i, entries;
334
335         entries = xq_nb_avail(uq, ndescs);
336
337         u_smp_rmb();
338
339         for (i = 0; i < entries; i++) {
340                 idx = uq->cached_cons++ & uq->mask;
341                 descs[i] = r[idx];
342         }
343
344         if (entries > 0) {
345                 u_smp_wmb();
346
347                 *uq->consumer = uq->cached_cons;
348         }
349
350         return entries;
351 }
352
353 static void swap_mac_addresses(void *data)
354 {
355         struct ether_header *eth = (struct ether_header *)data;
356         struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
357         struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
358         struct ether_addr tmp;
359
360         tmp = *src_addr;
361         *src_addr = *dst_addr;
362         *dst_addr = tmp;
363 }
364
365 static void hex_dump(void *pkt, size_t length, u64 addr)
366 {
367         const unsigned char *address = (unsigned char *)pkt;
368         const unsigned char *line = address;
369         size_t line_size = 32;
370         unsigned char c;
371         char buf[32];
372         int i = 0;
373
374         if (!DEBUG_HEXDUMP)
375                 return;
376
377         sprintf(buf, "addr=%llu", addr);
378         printf("length = %zu\n", length);
379         printf("%s | ", buf);
380         while (length-- > 0) {
381                 printf("%02X ", *address++);
382                 if (!(++i % line_size) || (length == 0 && i % line_size)) {
383                         if (length == 0) {
384                                 while (i++ % line_size)
385                                         printf("__ ");
386                         }
387                         printf(" | ");  /* right close */
388                         while (line < address) {
389                                 c = *line++;
390                                 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
391                         }
392                         printf("\n");
393                         if (length > 0)
394                                 printf("%s | ", buf);
395                 }
396         }
397         printf("\n");
398 }
399
400 static size_t gen_eth_frame(char *frame)
401 {
402         memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
403         return sizeof(pkt_data) - 1;
404 }
405
406 static struct xdp_umem *xdp_umem_configure(int sfd)
407 {
408         int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
409         struct xdp_mmap_offsets off;
410         struct xdp_umem_reg mr;
411         struct xdp_umem *umem;
412         socklen_t optlen;
413         void *bufs;
414
415         umem = calloc(1, sizeof(*umem));
416         lassert(umem);
417
418         lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
419                                NUM_FRAMES * FRAME_SIZE) == 0);
420
421         mr.addr = (__u64)bufs;
422         mr.len = NUM_FRAMES * FRAME_SIZE;
423         mr.chunk_size = FRAME_SIZE;
424         mr.headroom = FRAME_HEADROOM;
425
426         lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
427         lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
428                            sizeof(int)) == 0);
429         lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
430                            sizeof(int)) == 0);
431
432         optlen = sizeof(off);
433         lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
434                            &optlen) == 0);
435
436         umem->fq.map = mmap(0, off.fr.desc +
437                             FQ_NUM_DESCS * sizeof(u64),
438                             PROT_READ | PROT_WRITE,
439                             MAP_SHARED | MAP_POPULATE, sfd,
440                             XDP_UMEM_PGOFF_FILL_RING);
441         lassert(umem->fq.map != MAP_FAILED);
442
443         umem->fq.mask = FQ_NUM_DESCS - 1;
444         umem->fq.size = FQ_NUM_DESCS;
445         umem->fq.producer = umem->fq.map + off.fr.producer;
446         umem->fq.consumer = umem->fq.map + off.fr.consumer;
447         umem->fq.ring = umem->fq.map + off.fr.desc;
448         umem->fq.cached_cons = FQ_NUM_DESCS;
449
450         umem->cq.map = mmap(0, off.cr.desc +
451                              CQ_NUM_DESCS * sizeof(u64),
452                              PROT_READ | PROT_WRITE,
453                              MAP_SHARED | MAP_POPULATE, sfd,
454                              XDP_UMEM_PGOFF_COMPLETION_RING);
455         lassert(umem->cq.map != MAP_FAILED);
456
457         umem->cq.mask = CQ_NUM_DESCS - 1;
458         umem->cq.size = CQ_NUM_DESCS;
459         umem->cq.producer = umem->cq.map + off.cr.producer;
460         umem->cq.consumer = umem->cq.map + off.cr.consumer;
461         umem->cq.ring = umem->cq.map + off.cr.desc;
462
463         umem->frames = bufs;
464         umem->fd = sfd;
465
466         if (opt_bench == BENCH_TXONLY) {
467                 int i;
468
469                 for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
470                         (void)gen_eth_frame(&umem->frames[i]);
471         }
472
473         return umem;
474 }
475
476 static struct xdpsock *xsk_configure(struct xdp_umem *umem)
477 {
478         struct sockaddr_xdp sxdp = {};
479         struct xdp_mmap_offsets off;
480         int sfd, ndescs = NUM_DESCS;
481         struct xdpsock *xsk;
482         bool shared = true;
483         socklen_t optlen;
484         u64 i;
485
486         sfd = socket(PF_XDP, SOCK_RAW, 0);
487         lassert(sfd >= 0);
488
489         xsk = calloc(1, sizeof(*xsk));
490         lassert(xsk);
491
492         xsk->sfd = sfd;
493         xsk->outstanding_tx = 0;
494
495         if (!umem) {
496                 shared = false;
497                 xsk->umem = xdp_umem_configure(sfd);
498         } else {
499                 xsk->umem = umem;
500         }
501
502         lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
503                            &ndescs, sizeof(int)) == 0);
504         lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
505                            &ndescs, sizeof(int)) == 0);
506         optlen = sizeof(off);
507         lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
508                            &optlen) == 0);
509
510         /* Rx */
511         xsk->rx.map = mmap(NULL,
512                            off.rx.desc +
513                            NUM_DESCS * sizeof(struct xdp_desc),
514                            PROT_READ | PROT_WRITE,
515                            MAP_SHARED | MAP_POPULATE, sfd,
516                            XDP_PGOFF_RX_RING);
517         lassert(xsk->rx.map != MAP_FAILED);
518
519         if (!shared) {
520                 for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
521                         lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
522                                 == 0);
523         }
524
525         /* Tx */
526         xsk->tx.map = mmap(NULL,
527                            off.tx.desc +
528                            NUM_DESCS * sizeof(struct xdp_desc),
529                            PROT_READ | PROT_WRITE,
530                            MAP_SHARED | MAP_POPULATE, sfd,
531                            XDP_PGOFF_TX_RING);
532         lassert(xsk->tx.map != MAP_FAILED);
533
534         xsk->rx.mask = NUM_DESCS - 1;
535         xsk->rx.size = NUM_DESCS;
536         xsk->rx.producer = xsk->rx.map + off.rx.producer;
537         xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
538         xsk->rx.ring = xsk->rx.map + off.rx.desc;
539
540         xsk->tx.mask = NUM_DESCS - 1;
541         xsk->tx.size = NUM_DESCS;
542         xsk->tx.producer = xsk->tx.map + off.tx.producer;
543         xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
544         xsk->tx.ring = xsk->tx.map + off.tx.desc;
545         xsk->tx.cached_cons = NUM_DESCS;
546
547         sxdp.sxdp_family = PF_XDP;
548         sxdp.sxdp_ifindex = opt_ifindex;
549         sxdp.sxdp_queue_id = opt_queue;
550
551         if (shared) {
552                 sxdp.sxdp_flags = XDP_SHARED_UMEM;
553                 sxdp.sxdp_shared_umem_fd = umem->fd;
554         } else {
555                 sxdp.sxdp_flags = opt_xdp_bind_flags;
556         }
557
558         lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
559
560         return xsk;
561 }
562
563 static void print_benchmark(bool running)
564 {
565         const char *bench_str = "INVALID";
566
567         if (opt_bench == BENCH_RXDROP)
568                 bench_str = "rxdrop";
569         else if (opt_bench == BENCH_TXONLY)
570                 bench_str = "txonly";
571         else if (opt_bench == BENCH_L2FWD)
572                 bench_str = "l2fwd";
573
574         printf("%s:%d %s ", opt_if, opt_queue, bench_str);
575         if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
576                 printf("xdp-skb ");
577         else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
578                 printf("xdp-drv ");
579         else
580                 printf("        ");
581
582         if (opt_poll)
583                 printf("poll() ");
584
585         if (running) {
586                 printf("running...");
587                 fflush(stdout);
588         }
589 }
590
591 static void dump_stats(void)
592 {
593         unsigned long now = get_nsecs();
594         long dt = now - prev_time;
595         int i;
596
597         prev_time = now;
598
599         for (i = 0; i < num_socks && xsks[i]; i++) {
600                 char *fmt = "%-15s %'-11.0f %'-11lu\n";
601                 double rx_pps, tx_pps;
602
603                 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
604                          1000000000. / dt;
605                 tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
606                          1000000000. / dt;
607
608                 printf("\n sock%d@", i);
609                 print_benchmark(false);
610                 printf("\n");
611
612                 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
613                        dt / 1000000000.);
614                 printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
615                 printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
616
617                 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
618                 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
619         }
620 }
621
622 static void *poller(void *arg)
623 {
624         (void)arg;
625         for (;;) {
626                 sleep(opt_interval);
627                 dump_stats();
628         }
629
630         return NULL;
631 }
632
633 static void int_exit(int sig)
634 {
635         __u32 curr_prog_id = 0;
636
637         (void)sig;
638         dump_stats();
639         if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
640                 printf("bpf_get_link_xdp_id failed\n");
641                 exit(EXIT_FAILURE);
642         }
643         if (prog_id == curr_prog_id)
644                 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
645         else if (!curr_prog_id)
646                 printf("couldn't find a prog id on a given interface\n");
647         else
648                 printf("program on interface changed, not removing\n");
649         exit(EXIT_SUCCESS);
650 }
651
652 static struct option long_options[] = {
653         {"rxdrop", no_argument, 0, 'r'},
654         {"txonly", no_argument, 0, 't'},
655         {"l2fwd", no_argument, 0, 'l'},
656         {"interface", required_argument, 0, 'i'},
657         {"queue", required_argument, 0, 'q'},
658         {"poll", no_argument, 0, 'p'},
659         {"shared-buffer", no_argument, 0, 's'},
660         {"xdp-skb", no_argument, 0, 'S'},
661         {"xdp-native", no_argument, 0, 'N'},
662         {"interval", required_argument, 0, 'n'},
663         {"zero-copy", no_argument, 0, 'z'},
664         {"copy", no_argument, 0, 'c'},
665         {0, 0, 0, 0}
666 };
667
668 static void usage(const char *prog)
669 {
670         const char *str =
671                 "  Usage: %s [OPTIONS]\n"
672                 "  Options:\n"
673                 "  -r, --rxdrop         Discard all incoming packets (default)\n"
674                 "  -t, --txonly         Only send packets\n"
675                 "  -l, --l2fwd          MAC swap L2 forwarding\n"
676                 "  -i, --interface=n    Run on interface n\n"
677                 "  -q, --queue=n        Use queue n (default 0)\n"
678                 "  -p, --poll           Use poll syscall\n"
679                 "  -s, --shared-buffer  Use shared packet buffer\n"
680                 "  -S, --xdp-skb=n      Use XDP skb-mod\n"
681                 "  -N, --xdp-native=n   Enfore XDP native mode\n"
682                 "  -n, --interval=n     Specify statistics update interval (default 1 sec).\n"
683                 "  -z, --zero-copy      Force zero-copy mode.\n"
684                 "  -c, --copy           Force copy mode.\n"
685                 "\n";
686         fprintf(stderr, str, prog);
687         exit(EXIT_FAILURE);
688 }
689
690 static void parse_command_line(int argc, char **argv)
691 {
692         int option_index, c;
693
694         opterr = 0;
695
696         for (;;) {
697                 c = getopt_long(argc, argv, "Frtli:q:psSNn:cz", long_options,
698                                 &option_index);
699                 if (c == -1)
700                         break;
701
702                 switch (c) {
703                 case 'r':
704                         opt_bench = BENCH_RXDROP;
705                         break;
706                 case 't':
707                         opt_bench = BENCH_TXONLY;
708                         break;
709                 case 'l':
710                         opt_bench = BENCH_L2FWD;
711                         break;
712                 case 'i':
713                         opt_if = optarg;
714                         break;
715                 case 'q':
716                         opt_queue = atoi(optarg);
717                         break;
718                 case 's':
719                         opt_shared_packet_buffer = 1;
720                         break;
721                 case 'p':
722                         opt_poll = 1;
723                         break;
724                 case 'S':
725                         opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
726                         opt_xdp_bind_flags |= XDP_COPY;
727                         break;
728                 case 'N':
729                         opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
730                         break;
731                 case 'n':
732                         opt_interval = atoi(optarg);
733                         break;
734                 case 'z':
735                         opt_xdp_bind_flags |= XDP_ZEROCOPY;
736                         break;
737                 case 'c':
738                         opt_xdp_bind_flags |= XDP_COPY;
739                         break;
740                 case 'F':
741                         opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
742                         break;
743                 default:
744                         usage(basename(argv[0]));
745                 }
746         }
747
748         opt_ifindex = if_nametoindex(opt_if);
749         if (!opt_ifindex) {
750                 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
751                         opt_if);
752                 usage(basename(argv[0]));
753         }
754 }
755
756 static void kick_tx(int fd)
757 {
758         int ret;
759
760         ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
761         if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
762                 return;
763         lassert(0);
764 }
765
766 static inline void complete_tx_l2fwd(struct xdpsock *xsk)
767 {
768         u64 descs[BATCH_SIZE];
769         unsigned int rcvd;
770         size_t ndescs;
771
772         if (!xsk->outstanding_tx)
773                 return;
774
775         kick_tx(xsk->sfd);
776         ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
777                  xsk->outstanding_tx;
778
779         /* re-add completed Tx buffers */
780         rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
781         if (rcvd > 0) {
782                 umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
783                 xsk->outstanding_tx -= rcvd;
784                 xsk->tx_npkts += rcvd;
785         }
786 }
787
788 static inline void complete_tx_only(struct xdpsock *xsk)
789 {
790         u64 descs[BATCH_SIZE];
791         unsigned int rcvd;
792
793         if (!xsk->outstanding_tx)
794                 return;
795
796         kick_tx(xsk->sfd);
797
798         rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
799         if (rcvd > 0) {
800                 xsk->outstanding_tx -= rcvd;
801                 xsk->tx_npkts += rcvd;
802         }
803 }
804
805 static void rx_drop(struct xdpsock *xsk)
806 {
807         struct xdp_desc descs[BATCH_SIZE];
808         unsigned int rcvd, i;
809
810         rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
811         if (!rcvd)
812                 return;
813
814         for (i = 0; i < rcvd; i++) {
815                 char *pkt = xq_get_data(xsk, descs[i].addr);
816
817                 hex_dump(pkt, descs[i].len, descs[i].addr);
818         }
819
820         xsk->rx_npkts += rcvd;
821
822         umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
823 }
824
825 static void rx_drop_all(void)
826 {
827         struct pollfd fds[MAX_SOCKS + 1];
828         int i, ret, timeout, nfds = 1;
829
830         memset(fds, 0, sizeof(fds));
831
832         for (i = 0; i < num_socks; i++) {
833                 fds[i].fd = xsks[i]->sfd;
834                 fds[i].events = POLLIN;
835                 timeout = 1000; /* 1sn */
836         }
837
838         for (;;) {
839                 if (opt_poll) {
840                         ret = poll(fds, nfds, timeout);
841                         if (ret <= 0)
842                                 continue;
843                 }
844
845                 for (i = 0; i < num_socks; i++)
846                         rx_drop(xsks[i]);
847         }
848 }
849
850 static void tx_only(struct xdpsock *xsk)
851 {
852         int timeout, ret, nfds = 1;
853         struct pollfd fds[nfds + 1];
854         unsigned int idx = 0;
855
856         memset(fds, 0, sizeof(fds));
857         fds[0].fd = xsk->sfd;
858         fds[0].events = POLLOUT;
859         timeout = 1000; /* 1sn */
860
861         for (;;) {
862                 if (opt_poll) {
863                         ret = poll(fds, nfds, timeout);
864                         if (ret <= 0)
865                                 continue;
866
867                         if (fds[0].fd != xsk->sfd ||
868                             !(fds[0].revents & POLLOUT))
869                                 continue;
870                 }
871
872                 if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
873                         lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
874
875                         xsk->outstanding_tx += BATCH_SIZE;
876                         idx += BATCH_SIZE;
877                         idx %= NUM_FRAMES;
878                 }
879
880                 complete_tx_only(xsk);
881         }
882 }
883
884 static void l2fwd(struct xdpsock *xsk)
885 {
886         for (;;) {
887                 struct xdp_desc descs[BATCH_SIZE];
888                 unsigned int rcvd, i;
889                 int ret;
890
891                 for (;;) {
892                         complete_tx_l2fwd(xsk);
893
894                         rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
895                         if (rcvd > 0)
896                                 break;
897                 }
898
899                 for (i = 0; i < rcvd; i++) {
900                         char *pkt = xq_get_data(xsk, descs[i].addr);
901
902                         swap_mac_addresses(pkt);
903
904                         hex_dump(pkt, descs[i].len, descs[i].addr);
905                 }
906
907                 xsk->rx_npkts += rcvd;
908
909                 ret = xq_enq(&xsk->tx, descs, rcvd);
910                 lassert(ret == 0);
911                 xsk->outstanding_tx += rcvd;
912         }
913 }
914
915 int main(int argc, char **argv)
916 {
917         struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
918         struct bpf_prog_load_attr prog_load_attr = {
919                 .prog_type      = BPF_PROG_TYPE_XDP,
920         };
921         int prog_fd, qidconf_map, xsks_map;
922         struct bpf_prog_info info = {};
923         __u32 info_len = sizeof(info);
924         struct bpf_object *obj;
925         char xdp_filename[256];
926         struct bpf_map *map;
927         int i, ret, key = 0;
928         pthread_t pt;
929
930         parse_command_line(argc, argv);
931
932         if (setrlimit(RLIMIT_MEMLOCK, &r)) {
933                 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
934                         strerror(errno));
935                 exit(EXIT_FAILURE);
936         }
937
938         snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
939         prog_load_attr.file = xdp_filename;
940
941         if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
942                 exit(EXIT_FAILURE);
943         if (prog_fd < 0) {
944                 fprintf(stderr, "ERROR: no program found: %s\n",
945                         strerror(prog_fd));
946                 exit(EXIT_FAILURE);
947         }
948
949         map = bpf_object__find_map_by_name(obj, "qidconf_map");
950         qidconf_map = bpf_map__fd(map);
951         if (qidconf_map < 0) {
952                 fprintf(stderr, "ERROR: no qidconf map found: %s\n",
953                         strerror(qidconf_map));
954                 exit(EXIT_FAILURE);
955         }
956
957         map = bpf_object__find_map_by_name(obj, "xsks_map");
958         xsks_map = bpf_map__fd(map);
959         if (xsks_map < 0) {
960                 fprintf(stderr, "ERROR: no xsks map found: %s\n",
961                         strerror(xsks_map));
962                 exit(EXIT_FAILURE);
963         }
964
965         if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
966                 fprintf(stderr, "ERROR: link set xdp fd failed\n");
967                 exit(EXIT_FAILURE);
968         }
969
970         ret = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
971         if (ret) {
972                 printf("can't get prog info - %s\n", strerror(errno));
973                 return 1;
974         }
975         prog_id = info.id;
976
977         ret = bpf_map_update_elem(qidconf_map, &key, &opt_queue, 0);
978         if (ret) {
979                 fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
980                 exit(EXIT_FAILURE);
981         }
982
983         /* Create sockets... */
984         xsks[num_socks++] = xsk_configure(NULL);
985
986 #if RR_LB
987         for (i = 0; i < MAX_SOCKS - 1; i++)
988                 xsks[num_socks++] = xsk_configure(xsks[0]->umem);
989 #endif
990
991         /* ...and insert them into the map. */
992         for (i = 0; i < num_socks; i++) {
993                 key = i;
994                 ret = bpf_map_update_elem(xsks_map, &key, &xsks[i]->sfd, 0);
995                 if (ret) {
996                         fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
997                         exit(EXIT_FAILURE);
998                 }
999         }
1000
1001         signal(SIGINT, int_exit);
1002         signal(SIGTERM, int_exit);
1003         signal(SIGABRT, int_exit);
1004
1005         setlocale(LC_ALL, "");
1006
1007         ret = pthread_create(&pt, NULL, poller, NULL);
1008         lassert(ret == 0);
1009
1010         prev_time = get_nsecs();
1011
1012         if (opt_bench == BENCH_RXDROP)
1013                 rx_drop_all();
1014         else if (opt_bench == BENCH_TXONLY)
1015                 tx_only(xsks[0]);
1016         else
1017                 l2fwd(xsks[0]);
1018
1019         return 0;
1020 }