1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se>
5 * Uppsala University and
6 * Swedish University of Agricultural Sciences
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 * Ben Greear <greearb@candelatech.com>
10 * Jens Låås <jens.laas@data.slu.se>
12 * A tool for loading the network with preconfigurated packets.
13 * The tool is implemented as a linux module. Parameters are output
14 * device, delay (to hard_xmit), number of packets, and whether
15 * to use multiple SKBs or just the same one.
16 * pktgen uses the installed interface's output routine.
18 * Additional hacking by:
20 * Jens.Laas@data.slu.se
21 * Improved by ANK. 010120.
22 * Improved by ANK even more. 010212.
23 * MAC address typo fixed. 010417 --ro
24 * Integrated. 020301 --DaveM
25 * Added multiskb option 020301 --DaveM
26 * Scaling of results. 020417--sigurdur@linpro.no
27 * Significant re-work of the module:
28 * * Convert to threaded model to more efficiently be able to transmit
29 * and receive on multiple interfaces at once.
30 * * Converted many counters to __u64 to allow longer runs.
31 * * Allow configuration of ranges, like min/max IP address, MACs,
32 * and UDP-ports, for both source and destination, and can
33 * set to use a random distribution or sequentially walk the range.
34 * * Can now change most values after starting.
35 * * Place 12-byte packet in UDP payload with magic number,
36 * sequence number, and timestamp.
37 * * Add receiver code that detects dropped pkts, re-ordered pkts, and
38 * latencies (with micro-second) precision.
39 * * Add IOCTL interface to easily get counters & configuration.
40 * --Ben Greear <greearb@candelatech.com>
42 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct
43 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0
44 * as a "fastpath" with a configurable number of clones after alloc's.
45 * clone_skb=0 means all packets are allocated this also means ranges time
46 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100
49 * Also moved to /proc/net/pktgen/
52 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever
53 * mistakes. Also merged in DaveM's patch in the -pre6 patch.
54 * --Ben Greear <greearb@candelatech.com>
56 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br)
58 * 021124 Finished major redesign and rewrite for new functionality.
59 * See Documentation/networking/pktgen.txt for how to use this.
62 * For each CPU one thread/process is created at start. This process checks
63 * for running devices in the if_list and sends packets until count is 0 it
64 * also the thread checks the thread->control which is used for inter-process
65 * communication. controlling process "posts" operations to the threads this
67 * The if_list is RCU protected, and the if_lock remains to protect updating
68 * of if_list, from "add_device" as it invoked from userspace (via proc write).
70 * By design there should only be *one* "controlling" process. In practice
71 * multiple write accesses gives unpredictable result. Understood by "write"
72 * to /proc gives result code thats should be read be the "writer".
73 * For practical use this should be no problem.
75 * Note when adding devices to a specific CPU there good idea to also assign
76 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU.
79 * Fix refcount off by one if first packet fails, potential null deref,
82 * First "ranges" functionality for ipv6 030726 --ro
84 * Included flow support. 030802 ANK.
86 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org>
88 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419
89 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604
91 * New xmit() return, do_div and misc clean up by Stephen Hemminger
92 * <shemminger@osdl.org> 040923
94 * Randy Dunlap fixed u64 printk compiler warning
96 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org>
97 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
99 * Corrections from Nikolai Malykh (nmalykh@bilim.com)
100 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230
102 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com>
105 * MPLS support by Steven Whitehouse <steve@chygwyn.com>
107 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com>
109 * Fixed src_mac command to set source mac of packet to value specified in
110 * command by Adit Ranadive <adit.262@gmail.com>
113 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115 #include <linux/sys.h>
116 #include <linux/types.h>
117 #include <linux/module.h>
118 #include <linux/moduleparam.h>
119 #include <linux/kernel.h>
120 #include <linux/mutex.h>
121 #include <linux/sched.h>
122 #include <linux/slab.h>
123 #include <linux/vmalloc.h>
124 #include <linux/unistd.h>
125 #include <linux/string.h>
126 #include <linux/ptrace.h>
127 #include <linux/errno.h>
128 #include <linux/ioport.h>
129 #include <linux/interrupt.h>
130 #include <linux/capability.h>
131 #include <linux/hrtimer.h>
132 #include <linux/freezer.h>
133 #include <linux/delay.h>
134 #include <linux/timer.h>
135 #include <linux/list.h>
136 #include <linux/init.h>
137 #include <linux/skbuff.h>
138 #include <linux/netdevice.h>
139 #include <linux/inet.h>
140 #include <linux/inetdevice.h>
141 #include <linux/rtnetlink.h>
142 #include <linux/if_arp.h>
143 #include <linux/if_vlan.h>
144 #include <linux/in.h>
145 #include <linux/ip.h>
146 #include <linux/ipv6.h>
147 #include <linux/udp.h>
148 #include <linux/proc_fs.h>
149 #include <linux/seq_file.h>
150 #include <linux/wait.h>
151 #include <linux/etherdevice.h>
152 #include <linux/kthread.h>
153 #include <linux/prefetch.h>
154 #include <linux/mmzone.h>
155 #include <net/net_namespace.h>
156 #include <net/checksum.h>
157 #include <net/ipv6.h>
159 #include <net/ip6_checksum.h>
160 #include <net/addrconf.h>
162 #include <net/xfrm.h>
164 #include <net/netns/generic.h>
165 #include <asm/byteorder.h>
166 #include <linux/rcupdate.h>
167 #include <linux/bitops.h>
168 #include <linux/io.h>
169 #include <linux/timex.h>
170 #include <linux/uaccess.h>
172 #include <asm/div64.h> /* do_div */
174 #define VERSION "2.75"
175 #define IP_NAME_SZ 32
176 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
177 #define MPLS_STACK_BOTTOM htonl(0x00000100)
179 #define func_enter() pr_debug("entering %s\n", __func__);
182 pf(IPV6) /* Interface in IPV6 Mode */ \
183 pf(IPSRC_RND) /* IP-Src Random */ \
184 pf(IPDST_RND) /* IP-Dst Random */ \
185 pf(TXSIZE_RND) /* Transmit size is random */ \
186 pf(UDPSRC_RND) /* UDP-Src Random */ \
187 pf(UDPDST_RND) /* UDP-Dst Random */ \
188 pf(UDPCSUM) /* Include UDP checksum */ \
189 pf(NO_TIMESTAMP) /* Don't timestamp packets (default TS) */ \
190 pf(MPLS_RND) /* Random MPLS labels */ \
191 pf(QUEUE_MAP_RND) /* queue map Random */ \
192 pf(QUEUE_MAP_CPU) /* queue map mirrors smp_processor_id() */ \
193 pf(FLOW_SEQ) /* Sequential flows */ \
194 pf(IPSEC) /* ipsec on for flows */ \
195 pf(MACSRC_RND) /* MAC-Src Random */ \
196 pf(MACDST_RND) /* MAC-Dst Random */ \
197 pf(VID_RND) /* Random VLAN ID */ \
198 pf(SVID_RND) /* Random SVLAN ID */ \
199 pf(NODE) /* Node memory alloc*/ \
201 #define pf(flag) flag##_SHIFT,
207 /* Device flag bits */
208 #define pf(flag) static const __u32 F_##flag = (1<<flag##_SHIFT);
212 #define pf(flag) __stringify(flag),
213 static char *pkt_flag_names[] = {
218 #define NR_PKT_FLAGS ARRAY_SIZE(pkt_flag_names)
220 /* Thread control flag bits */
221 #define T_STOP (1<<0) /* Stop run */
222 #define T_RUN (1<<1) /* Start run */
223 #define T_REMDEVALL (1<<2) /* Remove all devs */
224 #define T_REMDEV (1<<3) /* Remove one dev */
227 #define M_START_XMIT 0 /* Default normal TX */
228 #define M_NETIF_RECEIVE 1 /* Inject packets into stack */
229 #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
231 /* If lock -- protects updating of if_list */
232 #define if_lock(t) mutex_lock(&(t->if_lock));
233 #define if_unlock(t) mutex_unlock(&(t->if_lock));
235 /* Used to help with determining the pkts on receive */
236 #define PKTGEN_MAGIC 0xbe9be955
237 #define PG_PROC_DIR "pktgen"
238 #define PGCTRL "pgctrl"
240 #define MAX_CFLOWS 65536
242 #define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4)
243 #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4)
249 struct xfrm_state *x;
255 #define F_INIT (1<<0) /* flow has been initialized */
259 * Try to keep frequent/infrequent used vars. separated.
261 struct proc_dir_entry *entry; /* proc file */
262 struct pktgen_thread *pg_thread;/* the owner */
263 struct list_head list; /* chaining in the thread's run-queue */
264 struct rcu_head rcu; /* freed by RCU */
266 int running; /* if false, the test will stop */
268 /* If min != max, then we will either do a linear iteration, or
269 * we will do a random selection from within the range.
275 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
277 int removal_mark; /* non-zero => the device is marked for
278 * removal by worker thread */
281 u64 delay; /* nano-seconds */
283 __u64 count; /* Default No packets to send */
284 __u64 sofar; /* How many pkts we've sent so far */
285 __u64 tx_bytes; /* How many bytes we've transmitted */
286 __u64 errors; /* Errors when trying to transmit, */
288 /* runtime counters relating to clone_skb */
291 int last_ok; /* Was last skb sent?
292 * Or a failed transmit of some sort?
293 * This will keep sequence numbers in order
298 u64 idle_acc; /* nano-seconds */
303 * Use multiple SKBs during packet gen.
304 * If this number is greater than 1, then
305 * that many copies of the same packet will be
306 * sent before a new packet is allocated.
307 * If you want to send 1024 identical packets
308 * before creating a new packet,
309 * set clone_skb to 1024.
312 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
313 char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
314 char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
315 char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
317 struct in6_addr in6_saddr;
318 struct in6_addr in6_daddr;
319 struct in6_addr cur_in6_daddr;
320 struct in6_addr cur_in6_saddr;
322 struct in6_addr min_in6_daddr;
323 struct in6_addr max_in6_daddr;
324 struct in6_addr min_in6_saddr;
325 struct in6_addr max_in6_saddr;
331 /* If we're doing ranges, random or incremental, then this
332 * defines the min/max for those ranges.
334 __be32 saddr_min; /* inclusive, source IP address */
335 __be32 saddr_max; /* exclusive, source IP address */
336 __be32 daddr_min; /* inclusive, dest IP address */
337 __be32 daddr_max; /* exclusive, dest IP address */
339 __u16 udp_src_min; /* inclusive, source UDP port */
340 __u16 udp_src_max; /* exclusive, source UDP port */
341 __u16 udp_dst_min; /* inclusive, dest UDP port */
342 __u16 udp_dst_max; /* exclusive, dest UDP port */
345 __u8 tos; /* six MSB of (former) IPv4 TOS
346 are for dscp codepoint */
347 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6
348 (see RFC 3260, sec. 4) */
351 unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */
352 __be32 labels[MAX_MPLS_LABELS];
354 /* VLAN/SVLAN (802.1Q/Q-in-Q) */
357 __u16 vlan_id; /* 0xffff means no vlan tag */
361 __u16 svlan_id; /* 0xffff means no svlan tag */
363 __u32 src_mac_count; /* How many MACs to iterate through */
364 __u32 dst_mac_count; /* How many MACs to iterate through */
366 unsigned char dst_mac[ETH_ALEN];
367 unsigned char src_mac[ETH_ALEN];
369 __u32 cur_dst_mac_offset;
370 __u32 cur_src_mac_offset;
382 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB,
384 We fill in SRC address later
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
389 __u16 pad; /* pad out the hh struct to an even 16 bytes */
391 struct sk_buff *skb; /* skb we are to transmit next, used for when we
392 * are transmitting the same one multiple times
394 struct net_device *odev; /* The out-going device.
395 * Note that the device should have it's
396 * pg_info pointer pointing back to this
398 * Set when the user specifies the out-going
399 * device name (not when the inject is
400 * started as it used to do.)
403 struct flow_state *flows;
404 unsigned int cflows; /* Concurrent flows (config) */
405 unsigned int lflow; /* Flow length (config) */
406 unsigned int nflows; /* accumulated flows (stats) */
407 unsigned int curfl; /* current sequenced flow (state)*/
411 __u32 skb_priority; /* skb priority field */
412 unsigned int burst; /* number of duplicated packets to burst */
413 int node; /* Memory node */
416 __u8 ipsmode; /* IPSEC mode (config) */
417 __u8 ipsproto; /* IPSEC type (config) */
419 struct xfrm_dst xdst;
420 struct dst_ops dstops;
433 static unsigned int pg_net_id __read_mostly;
437 struct proc_dir_entry *proc_dir;
438 struct list_head pktgen_threads;
442 struct pktgen_thread {
443 struct mutex if_lock; /* for list of devices */
444 struct list_head if_list; /* All device here */
445 struct list_head th_list;
446 struct task_struct *tsk;
449 /* Field for thread to receive "posted" events terminate,
455 wait_queue_head_t queue;
456 struct completion start_done;
457 struct pktgen_net *net;
463 static const char version[] =
464 "Packet Generator for packet performance testing. "
465 "Version: " VERSION "\n";
467 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
468 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
469 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
470 const char *ifname, bool exact);
471 static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
472 static void pktgen_run_all_threads(struct pktgen_net *pn);
473 static void pktgen_reset_all_threads(struct pktgen_net *pn);
474 static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn);
476 static void pktgen_stop(struct pktgen_thread *t);
477 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
479 /* Module parameters, defaults. */
480 static int pg_count_d __read_mostly = 1000;
481 static int pg_delay_d __read_mostly;
482 static int pg_clone_skb_d __read_mostly;
483 static int debug __read_mostly;
485 static DEFINE_MUTEX(pktgen_thread_lock);
487 static struct notifier_block pktgen_notifier_block = {
488 .notifier_call = pktgen_device_event,
492 * /proc handling functions
496 static int pgctrl_show(struct seq_file *seq, void *v)
498 seq_puts(seq, version);
502 static ssize_t pgctrl_write(struct file *file, const char __user *buf,
503 size_t count, loff_t *ppos)
506 struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id);
508 if (!capable(CAP_NET_ADMIN))
514 if (count > sizeof(data))
515 count = sizeof(data);
517 if (copy_from_user(data, buf, count))
520 data[count - 1] = 0; /* Strip trailing '\n' and terminate string */
522 if (!strcmp(data, "stop"))
523 pktgen_stop_all_threads_ifs(pn);
525 else if (!strcmp(data, "start"))
526 pktgen_run_all_threads(pn);
528 else if (!strcmp(data, "reset"))
529 pktgen_reset_all_threads(pn);
537 static int pgctrl_open(struct inode *inode, struct file *file)
539 return single_open(file, pgctrl_show, PDE_DATA(inode));
542 static const struct file_operations pktgen_fops = {
546 .write = pgctrl_write,
547 .release = single_release,
550 static int pktgen_if_show(struct seq_file *seq, void *v)
552 const struct pktgen_dev *pkt_dev = seq->private;
558 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
559 (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size,
560 pkt_dev->max_pkt_size);
563 " frags: %d delay: %llu clone_skb: %d ifname: %s\n",
564 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
565 pkt_dev->clone_skb, pkt_dev->odevname);
567 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
571 " queue_map_min: %u queue_map_max: %u\n",
572 pkt_dev->queue_map_min,
573 pkt_dev->queue_map_max);
575 if (pkt_dev->skb_priority)
576 seq_printf(seq, " skb_priority: %u\n",
577 pkt_dev->skb_priority);
579 if (pkt_dev->flags & F_IPV6) {
581 " saddr: %pI6c min_saddr: %pI6c max_saddr: %pI6c\n"
582 " daddr: %pI6c min_daddr: %pI6c max_daddr: %pI6c\n",
584 &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr,
586 &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr);
589 " dst_min: %s dst_max: %s\n",
590 pkt_dev->dst_min, pkt_dev->dst_max);
592 " src_min: %s src_max: %s\n",
593 pkt_dev->src_min, pkt_dev->src_max);
596 seq_puts(seq, " src_mac: ");
598 seq_printf(seq, "%pM ",
599 is_zero_ether_addr(pkt_dev->src_mac) ?
600 pkt_dev->odev->dev_addr : pkt_dev->src_mac);
602 seq_puts(seq, "dst_mac: ");
603 seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
606 " udp_src_min: %d udp_src_max: %d"
607 " udp_dst_min: %d udp_dst_max: %d\n",
608 pkt_dev->udp_src_min, pkt_dev->udp_src_max,
609 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max);
612 " src_mac_count: %d dst_mac_count: %d\n",
613 pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
615 if (pkt_dev->nr_labels) {
616 seq_puts(seq, " mpls: ");
617 for (i = 0; i < pkt_dev->nr_labels; i++)
618 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
619 i == pkt_dev->nr_labels-1 ? "\n" : ", ");
622 if (pkt_dev->vlan_id != 0xffff)
623 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n",
624 pkt_dev->vlan_id, pkt_dev->vlan_p,
627 if (pkt_dev->svlan_id != 0xffff)
628 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n",
629 pkt_dev->svlan_id, pkt_dev->svlan_p,
633 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos);
635 if (pkt_dev->traffic_class)
636 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
638 if (pkt_dev->burst > 1)
639 seq_printf(seq, " burst: %d\n", pkt_dev->burst);
641 if (pkt_dev->node >= 0)
642 seq_printf(seq, " node: %d\n", pkt_dev->node);
644 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
645 seq_puts(seq, " xmit_mode: netif_receive\n");
646 else if (pkt_dev->xmit_mode == M_QUEUE_XMIT)
647 seq_puts(seq, " xmit_mode: xmit_queue\n");
649 seq_puts(seq, " Flags: ");
651 for (i = 0; i < NR_PKT_FLAGS; i++) {
653 if (!pkt_dev->cflows)
656 if (pkt_dev->flags & (1 << i))
657 seq_printf(seq, "%s ", pkt_flag_names[i]);
658 else if (i == F_FLOW_SEQ)
659 seq_puts(seq, "FLOW_RND ");
662 if (i == F_IPSEC && pkt_dev->spi)
663 seq_printf(seq, "spi:%u", pkt_dev->spi);
669 /* not really stopped, more like last-running-at */
670 stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at;
671 idle = pkt_dev->idle_acc;
672 do_div(idle, NSEC_PER_USEC);
675 "Current:\n pkts-sofar: %llu errors: %llu\n",
676 (unsigned long long)pkt_dev->sofar,
677 (unsigned long long)pkt_dev->errors);
680 " started: %lluus stopped: %lluus idle: %lluus\n",
681 (unsigned long long) ktime_to_us(pkt_dev->started_at),
682 (unsigned long long) ktime_to_us(stopped),
683 (unsigned long long) idle);
686 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
687 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset,
688 pkt_dev->cur_src_mac_offset);
690 if (pkt_dev->flags & F_IPV6) {
691 seq_printf(seq, " cur_saddr: %pI6c cur_daddr: %pI6c\n",
692 &pkt_dev->cur_in6_saddr,
693 &pkt_dev->cur_in6_daddr);
695 seq_printf(seq, " cur_saddr: %pI4 cur_daddr: %pI4\n",
696 &pkt_dev->cur_saddr, &pkt_dev->cur_daddr);
698 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n",
699 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
701 seq_printf(seq, " cur_queue_map: %u\n", pkt_dev->cur_queue_map);
703 seq_printf(seq, " flows: %u\n", pkt_dev->nflows);
705 if (pkt_dev->result[0])
706 seq_printf(seq, "Result: %s\n", pkt_dev->result);
708 seq_puts(seq, "Result: Idle\n");
714 static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
720 for (; i < maxlen; i++) {
724 if (get_user(c, &user_buffer[i]))
726 value = hex_to_bin(c);
735 static int count_trail_chars(const char __user * user_buffer,
740 for (i = 0; i < maxlen; i++) {
742 if (get_user(c, &user_buffer[i]))
760 static long num_arg(const char __user *user_buffer, unsigned long maxlen,
766 for (i = 0; i < maxlen; i++) {
768 if (get_user(c, &user_buffer[i]))
770 if ((c >= '0') && (c <= '9')) {
779 static int strn_len(const char __user * user_buffer, unsigned int maxlen)
783 for (i = 0; i < maxlen; i++) {
785 if (get_user(c, &user_buffer[i]))
802 static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
809 pkt_dev->nr_labels = 0;
812 len = hex32_arg(&buffer[i], 8, &tmp);
815 pkt_dev->labels[n] = htonl(tmp);
816 if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM)
817 pkt_dev->flags |= F_MPLS_RND;
819 if (get_user(c, &buffer[i]))
823 if (n >= MAX_MPLS_LABELS)
827 pkt_dev->nr_labels = n;
831 static __u32 pktgen_read_flag(const char *f, bool *disable)
840 for (i = 0; i < NR_PKT_FLAGS; i++) {
841 if (!IS_ENABLED(CONFIG_XFRM) && i == IPSEC_SHIFT)
844 /* allow only disabling ipv6 flag */
845 if (!*disable && i == IPV6_SHIFT)
848 if (strcmp(f, pkt_flag_names[i]) == 0)
852 if (strcmp(f, "FLOW_RND") == 0) {
853 *disable = !*disable;
860 static ssize_t pktgen_if_write(struct file *file,
861 const char __user * user_buffer, size_t count,
864 struct seq_file *seq = file->private_data;
865 struct pktgen_dev *pkt_dev = seq->private;
867 char name[16], valstr[32];
868 unsigned long value = 0;
869 char *pg_result = NULL;
873 pg_result = &(pkt_dev->result[0]);
876 pr_warn("wrong command format\n");
881 tmp = count_trail_chars(user_buffer, max);
883 pr_warn("illegal format\n");
888 /* Read variable name */
890 len = strn_len(&user_buffer[i], sizeof(name) - 1);
894 memset(name, 0, sizeof(name));
895 if (copy_from_user(name, &user_buffer[i], len))
900 len = count_trail_chars(&user_buffer[i], max);
907 size_t copy = min_t(size_t, count + 1, 1024);
908 char *tp = strndup_user(user_buffer, copy);
913 pr_debug("%s,%zu buffer -:%s:-\n", name, count, tp);
917 if (!strcmp(name, "min_pkt_size")) {
918 len = num_arg(&user_buffer[i], 10, &value);
923 if (value < 14 + 20 + 8)
925 if (value != pkt_dev->min_pkt_size) {
926 pkt_dev->min_pkt_size = value;
927 pkt_dev->cur_pkt_size = value;
929 sprintf(pg_result, "OK: min_pkt_size=%u",
930 pkt_dev->min_pkt_size);
934 if (!strcmp(name, "max_pkt_size")) {
935 len = num_arg(&user_buffer[i], 10, &value);
940 if (value < 14 + 20 + 8)
942 if (value != pkt_dev->max_pkt_size) {
943 pkt_dev->max_pkt_size = value;
944 pkt_dev->cur_pkt_size = value;
946 sprintf(pg_result, "OK: max_pkt_size=%u",
947 pkt_dev->max_pkt_size);
951 /* Shortcut for min = max */
953 if (!strcmp(name, "pkt_size")) {
954 len = num_arg(&user_buffer[i], 10, &value);
959 if (value < 14 + 20 + 8)
961 if (value != pkt_dev->min_pkt_size) {
962 pkt_dev->min_pkt_size = value;
963 pkt_dev->max_pkt_size = value;
964 pkt_dev->cur_pkt_size = value;
966 sprintf(pg_result, "OK: pkt_size=%u", pkt_dev->min_pkt_size);
970 if (!strcmp(name, "debug")) {
971 len = num_arg(&user_buffer[i], 10, &value);
977 sprintf(pg_result, "OK: debug=%u", debug);
981 if (!strcmp(name, "frags")) {
982 len = num_arg(&user_buffer[i], 10, &value);
987 pkt_dev->nfrags = value;
988 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags);
991 if (!strcmp(name, "delay")) {
992 len = num_arg(&user_buffer[i], 10, &value);
997 if (value == 0x7FFFFFFF)
998 pkt_dev->delay = ULLONG_MAX;
1000 pkt_dev->delay = (u64)value;
1002 sprintf(pg_result, "OK: delay=%llu",
1003 (unsigned long long) pkt_dev->delay);
1006 if (!strcmp(name, "rate")) {
1007 len = num_arg(&user_buffer[i], 10, &value);
1014 pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value;
1016 pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
1018 sprintf(pg_result, "OK: rate=%lu", value);
1021 if (!strcmp(name, "ratep")) {
1022 len = num_arg(&user_buffer[i], 10, &value);
1029 pkt_dev->delay = NSEC_PER_SEC/value;
1031 pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
1033 sprintf(pg_result, "OK: rate=%lu", value);
1036 if (!strcmp(name, "udp_src_min")) {
1037 len = num_arg(&user_buffer[i], 10, &value);
1042 if (value != pkt_dev->udp_src_min) {
1043 pkt_dev->udp_src_min = value;
1044 pkt_dev->cur_udp_src = value;
1046 sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min);
1049 if (!strcmp(name, "udp_dst_min")) {
1050 len = num_arg(&user_buffer[i], 10, &value);
1055 if (value != pkt_dev->udp_dst_min) {
1056 pkt_dev->udp_dst_min = value;
1057 pkt_dev->cur_udp_dst = value;
1059 sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min);
1062 if (!strcmp(name, "udp_src_max")) {
1063 len = num_arg(&user_buffer[i], 10, &value);
1068 if (value != pkt_dev->udp_src_max) {
1069 pkt_dev->udp_src_max = value;
1070 pkt_dev->cur_udp_src = value;
1072 sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max);
1075 if (!strcmp(name, "udp_dst_max")) {
1076 len = num_arg(&user_buffer[i], 10, &value);
1081 if (value != pkt_dev->udp_dst_max) {
1082 pkt_dev->udp_dst_max = value;
1083 pkt_dev->cur_udp_dst = value;
1085 sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max);
1088 if (!strcmp(name, "clone_skb")) {
1089 len = num_arg(&user_buffer[i], 10, &value);
1093 ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
1094 !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
1097 pkt_dev->clone_skb = value;
1099 sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb);
1102 if (!strcmp(name, "count")) {
1103 len = num_arg(&user_buffer[i], 10, &value);
1108 pkt_dev->count = value;
1109 sprintf(pg_result, "OK: count=%llu",
1110 (unsigned long long)pkt_dev->count);
1113 if (!strcmp(name, "src_mac_count")) {
1114 len = num_arg(&user_buffer[i], 10, &value);
1119 if (pkt_dev->src_mac_count != value) {
1120 pkt_dev->src_mac_count = value;
1121 pkt_dev->cur_src_mac_offset = 0;
1123 sprintf(pg_result, "OK: src_mac_count=%d",
1124 pkt_dev->src_mac_count);
1127 if (!strcmp(name, "dst_mac_count")) {
1128 len = num_arg(&user_buffer[i], 10, &value);
1133 if (pkt_dev->dst_mac_count != value) {
1134 pkt_dev->dst_mac_count = value;
1135 pkt_dev->cur_dst_mac_offset = 0;
1137 sprintf(pg_result, "OK: dst_mac_count=%d",
1138 pkt_dev->dst_mac_count);
1141 if (!strcmp(name, "burst")) {
1142 len = num_arg(&user_buffer[i], 10, &value);
1148 ((pkt_dev->xmit_mode == M_QUEUE_XMIT) ||
1149 ((pkt_dev->xmit_mode == M_START_XMIT) &&
1150 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))))
1152 pkt_dev->burst = value < 1 ? 1 : value;
1153 sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
1156 if (!strcmp(name, "node")) {
1157 len = num_arg(&user_buffer[i], 10, &value);
1163 if (node_possible(value)) {
1164 pkt_dev->node = value;
1165 sprintf(pg_result, "OK: node=%d", pkt_dev->node);
1166 if (pkt_dev->page) {
1167 put_page(pkt_dev->page);
1168 pkt_dev->page = NULL;
1172 sprintf(pg_result, "ERROR: node not possible");
1175 if (!strcmp(name, "xmit_mode")) {
1179 len = strn_len(&user_buffer[i], sizeof(f) - 1);
1183 if (copy_from_user(f, &user_buffer[i], len))
1187 if (strcmp(f, "start_xmit") == 0) {
1188 pkt_dev->xmit_mode = M_START_XMIT;
1189 } else if (strcmp(f, "netif_receive") == 0) {
1190 /* clone_skb set earlier, not supported in this mode */
1191 if (pkt_dev->clone_skb > 0)
1194 pkt_dev->xmit_mode = M_NETIF_RECEIVE;
1196 /* make sure new packet is allocated every time
1197 * pktgen_xmit() is called
1199 pkt_dev->last_ok = 1;
1201 /* override clone_skb if user passed default value
1202 * at module loading time
1204 pkt_dev->clone_skb = 0;
1205 } else if (strcmp(f, "queue_xmit") == 0) {
1206 pkt_dev->xmit_mode = M_QUEUE_XMIT;
1207 pkt_dev->last_ok = 1;
1210 "xmit_mode -:%s:- unknown\nAvailable modes: %s",
1211 f, "start_xmit, netif_receive\n");
1214 sprintf(pg_result, "OK: xmit_mode=%s", f);
1217 if (!strcmp(name, "flag")) {
1220 bool disable = false;
1223 len = strn_len(&user_buffer[i], sizeof(f) - 1);
1227 if (copy_from_user(f, &user_buffer[i], len))
1231 flag = pktgen_read_flag(f, &disable);
1235 pkt_dev->flags &= ~flag;
1237 pkt_dev->flags |= flag;
1240 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
1242 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
1243 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, "
1244 "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, "
1245 "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, "
1253 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
1256 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
1257 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1);
1261 if (copy_from_user(buf, &user_buffer[i], len))
1264 if (strcmp(buf, pkt_dev->dst_min) != 0) {
1265 memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min));
1266 strcpy(pkt_dev->dst_min, buf);
1267 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
1268 pkt_dev->cur_daddr = pkt_dev->daddr_min;
1271 pr_debug("dst_min set to: %s\n", pkt_dev->dst_min);
1273 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
1276 if (!strcmp(name, "dst_max")) {
1277 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1);
1281 if (copy_from_user(buf, &user_buffer[i], len))
1284 if (strcmp(buf, pkt_dev->dst_max) != 0) {
1285 memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max));
1286 strcpy(pkt_dev->dst_max, buf);
1287 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
1288 pkt_dev->cur_daddr = pkt_dev->daddr_max;
1291 pr_debug("dst_max set to: %s\n", pkt_dev->dst_max);
1293 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
1296 if (!strcmp(name, "dst6")) {
1297 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1301 pkt_dev->flags |= F_IPV6;
1303 if (copy_from_user(buf, &user_buffer[i], len))
1307 in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL);
1308 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
1310 pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
1313 pr_debug("dst6 set to: %s\n", buf);
1316 sprintf(pg_result, "OK: dst6=%s", buf);
1319 if (!strcmp(name, "dst6_min")) {
1320 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1324 pkt_dev->flags |= F_IPV6;
1326 if (copy_from_user(buf, &user_buffer[i], len))
1330 in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL);
1331 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
1333 pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
1335 pr_debug("dst6_min set to: %s\n", buf);
1338 sprintf(pg_result, "OK: dst6_min=%s", buf);
1341 if (!strcmp(name, "dst6_max")) {
1342 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1346 pkt_dev->flags |= F_IPV6;
1348 if (copy_from_user(buf, &user_buffer[i], len))
1352 in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL);
1353 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
1356 pr_debug("dst6_max set to: %s\n", buf);
1359 sprintf(pg_result, "OK: dst6_max=%s", buf);
1362 if (!strcmp(name, "src6_min")) {
1363 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1367 pkt_dev->flags |= F_IPV6;
1369 if (copy_from_user(buf, &user_buffer[i], len))
1373 in6_pton(buf, -1, pkt_dev->min_in6_saddr.s6_addr, -1, NULL);
1374 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_saddr);
1376 memcpy(&pkt_dev->min_in6_h, pkt_dev->min_in6_saddr.s6_addr, 8);
1377 memcpy(&pkt_dev->min_in6_l, pkt_dev->min_in6_saddr.s6_addr + 8, 8);
1378 pkt_dev->min_in6_h = be64_to_cpu(pkt_dev->min_in6_h);
1379 pkt_dev->min_in6_l = be64_to_cpu(pkt_dev->min_in6_l);
1381 pkt_dev->cur_in6_saddr = pkt_dev->min_in6_saddr;
1383 pr_debug("src6_min set to: %s\n", buf);
1386 sprintf(pg_result, "OK: src6_min=%s", buf);
1389 if (!strcmp(name, "src6_max")) {
1390 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1394 pkt_dev->flags |= F_IPV6;
1396 if (copy_from_user(buf, &user_buffer[i], len))
1400 in6_pton(buf, -1, pkt_dev->max_in6_saddr.s6_addr, -1, NULL);
1401 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_saddr);
1403 memcpy(&pkt_dev->max_in6_h, pkt_dev->max_in6_saddr.s6_addr, 8);
1404 memcpy(&pkt_dev->max_in6_l, pkt_dev->max_in6_saddr.s6_addr + 8, 8);
1405 pkt_dev->max_in6_h = be64_to_cpu(pkt_dev->max_in6_h);
1406 pkt_dev->max_in6_l = be64_to_cpu(pkt_dev->max_in6_l);
1409 pr_debug("src6_max set to: %s\n", buf);
1412 sprintf(pg_result, "OK: src6_max=%s", buf);
1415 if (!strcmp(name, "src6")) {
1416 len = strn_len(&user_buffer[i], sizeof(buf) - 1);
1420 pkt_dev->flags |= F_IPV6;
1422 if (copy_from_user(buf, &user_buffer[i], len))
1426 in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL);
1427 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
1429 pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
1432 pr_debug("src6 set to: %s\n", buf);
1435 sprintf(pg_result, "OK: src6=%s", buf);
1438 if (!strcmp(name, "src_min")) {
1439 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1);
1443 if (copy_from_user(buf, &user_buffer[i], len))
1446 if (strcmp(buf, pkt_dev->src_min) != 0) {
1447 memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min));
1448 strcpy(pkt_dev->src_min, buf);
1449 pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
1450 pkt_dev->cur_saddr = pkt_dev->saddr_min;
1453 pr_debug("src_min set to: %s\n", pkt_dev->src_min);
1455 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
1458 if (!strcmp(name, "src_max")) {
1459 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1);
1463 if (copy_from_user(buf, &user_buffer[i], len))
1466 if (strcmp(buf, pkt_dev->src_max) != 0) {
1467 memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max));
1468 strcpy(pkt_dev->src_max, buf);
1469 pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
1470 pkt_dev->cur_saddr = pkt_dev->saddr_max;
1473 pr_debug("src_max set to: %s\n", pkt_dev->src_max);
1475 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
1478 if (!strcmp(name, "dst_mac")) {
1479 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1483 memset(valstr, 0, sizeof(valstr));
1484 if (copy_from_user(valstr, &user_buffer[i], len))
1487 if (!mac_pton(valstr, pkt_dev->dst_mac))
1489 /* Set up Dest MAC */
1490 ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac);
1492 sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac);
1495 if (!strcmp(name, "src_mac")) {
1496 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1500 memset(valstr, 0, sizeof(valstr));
1501 if (copy_from_user(valstr, &user_buffer[i], len))
1504 if (!mac_pton(valstr, pkt_dev->src_mac))
1506 /* Set up Src MAC */
1507 ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac);
1509 sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac);
1513 if (!strcmp(name, "clear_counters")) {
1514 pktgen_clear_counters(pkt_dev);
1515 sprintf(pg_result, "OK: Clearing counters.\n");
1519 if (!strcmp(name, "flows")) {
1520 len = num_arg(&user_buffer[i], 10, &value);
1525 if (value > MAX_CFLOWS)
1528 pkt_dev->cflows = value;
1529 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows);
1533 if (!strcmp(name, "spi")) {
1534 len = num_arg(&user_buffer[i], 10, &value);
1539 pkt_dev->spi = value;
1540 sprintf(pg_result, "OK: spi=%u", pkt_dev->spi);
1544 if (!strcmp(name, "flowlen")) {
1545 len = num_arg(&user_buffer[i], 10, &value);
1550 pkt_dev->lflow = value;
1551 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow);
1555 if (!strcmp(name, "queue_map_min")) {
1556 len = num_arg(&user_buffer[i], 5, &value);
1561 pkt_dev->queue_map_min = value;
1562 sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min);
1566 if (!strcmp(name, "queue_map_max")) {
1567 len = num_arg(&user_buffer[i], 5, &value);
1572 pkt_dev->queue_map_max = value;
1573 sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max);
1577 if (!strcmp(name, "mpls")) {
1578 unsigned int n, cnt;
1580 len = get_labels(&user_buffer[i], pkt_dev);
1584 cnt = sprintf(pg_result, "OK: mpls=");
1585 for (n = 0; n < pkt_dev->nr_labels; n++)
1586 cnt += sprintf(pg_result + cnt,
1587 "%08x%s", ntohl(pkt_dev->labels[n]),
1588 n == pkt_dev->nr_labels-1 ? "" : ",");
1590 if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) {
1591 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1592 pkt_dev->svlan_id = 0xffff;
1595 pr_debug("VLAN/SVLAN auto turned off\n");
1600 if (!strcmp(name, "vlan_id")) {
1601 len = num_arg(&user_buffer[i], 4, &value);
1606 if (value <= 4095) {
1607 pkt_dev->vlan_id = value; /* turn on VLAN */
1610 pr_debug("VLAN turned on\n");
1612 if (debug && pkt_dev->nr_labels)
1613 pr_debug("MPLS auto turned off\n");
1615 pkt_dev->nr_labels = 0; /* turn off MPLS */
1616 sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
1618 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1619 pkt_dev->svlan_id = 0xffff;
1622 pr_debug("VLAN/SVLAN turned off\n");
1627 if (!strcmp(name, "vlan_p")) {
1628 len = num_arg(&user_buffer[i], 1, &value);
1633 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) {
1634 pkt_dev->vlan_p = value;
1635 sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p);
1637 sprintf(pg_result, "ERROR: vlan_p must be 0-7");
1642 if (!strcmp(name, "vlan_cfi")) {
1643 len = num_arg(&user_buffer[i], 1, &value);
1648 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) {
1649 pkt_dev->vlan_cfi = value;
1650 sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi);
1652 sprintf(pg_result, "ERROR: vlan_cfi must be 0-1");
1657 if (!strcmp(name, "svlan_id")) {
1658 len = num_arg(&user_buffer[i], 4, &value);
1663 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) {
1664 pkt_dev->svlan_id = value; /* turn on SVLAN */
1667 pr_debug("SVLAN turned on\n");
1669 if (debug && pkt_dev->nr_labels)
1670 pr_debug("MPLS auto turned off\n");
1672 pkt_dev->nr_labels = 0; /* turn off MPLS */
1673 sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
1675 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1676 pkt_dev->svlan_id = 0xffff;
1679 pr_debug("VLAN/SVLAN turned off\n");
1684 if (!strcmp(name, "svlan_p")) {
1685 len = num_arg(&user_buffer[i], 1, &value);
1690 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) {
1691 pkt_dev->svlan_p = value;
1692 sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p);
1694 sprintf(pg_result, "ERROR: svlan_p must be 0-7");
1699 if (!strcmp(name, "svlan_cfi")) {
1700 len = num_arg(&user_buffer[i], 1, &value);
1705 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) {
1706 pkt_dev->svlan_cfi = value;
1707 sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi);
1709 sprintf(pg_result, "ERROR: svlan_cfi must be 0-1");
1714 if (!strcmp(name, "tos")) {
1715 __u32 tmp_value = 0;
1716 len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1722 pkt_dev->tos = tmp_value;
1723 sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos);
1725 sprintf(pg_result, "ERROR: tos must be 00-ff");
1730 if (!strcmp(name, "traffic_class")) {
1731 __u32 tmp_value = 0;
1732 len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1738 pkt_dev->traffic_class = tmp_value;
1739 sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class);
1741 sprintf(pg_result, "ERROR: traffic_class must be 00-ff");
1746 if (!strcmp(name, "skb_priority")) {
1747 len = num_arg(&user_buffer[i], 9, &value);
1752 pkt_dev->skb_priority = value;
1753 sprintf(pg_result, "OK: skb_priority=%i",
1754 pkt_dev->skb_priority);
1758 sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
1762 static int pktgen_if_open(struct inode *inode, struct file *file)
1764 return single_open(file, pktgen_if_show, PDE_DATA(inode));
1767 static const struct file_operations pktgen_if_fops = {
1768 .open = pktgen_if_open,
1770 .llseek = seq_lseek,
1771 .write = pktgen_if_write,
1772 .release = single_release,
1775 static int pktgen_thread_show(struct seq_file *seq, void *v)
1777 struct pktgen_thread *t = seq->private;
1778 const struct pktgen_dev *pkt_dev;
1782 seq_puts(seq, "Running: ");
1785 list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
1786 if (pkt_dev->running)
1787 seq_printf(seq, "%s ", pkt_dev->odevname);
1789 seq_puts(seq, "\nStopped: ");
1791 list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
1792 if (!pkt_dev->running)
1793 seq_printf(seq, "%s ", pkt_dev->odevname);
1796 seq_printf(seq, "\nResult: %s\n", t->result);
1798 seq_puts(seq, "\nResult: NA\n");
1805 static ssize_t pktgen_thread_write(struct file *file,
1806 const char __user * user_buffer,
1807 size_t count, loff_t * offset)
1809 struct seq_file *seq = file->private_data;
1810 struct pktgen_thread *t = seq->private;
1811 int i, max, len, ret;
1816 // sprintf(pg_result, "Wrong command format");
1821 len = count_trail_chars(user_buffer, max);
1827 /* Read variable name */
1829 len = strn_len(&user_buffer[i], sizeof(name) - 1);
1833 memset(name, 0, sizeof(name));
1834 if (copy_from_user(name, &user_buffer[i], len))
1839 len = count_trail_chars(&user_buffer[i], max);
1846 pr_debug("t=%s, count=%lu\n", name, (unsigned long)count);
1849 pr_err("ERROR: No thread\n");
1854 pg_result = &(t->result[0]);
1856 if (!strcmp(name, "add_device")) {
1859 len = strn_len(&user_buffer[i], sizeof(f) - 1);
1864 if (copy_from_user(f, &user_buffer[i], len))
1867 mutex_lock(&pktgen_thread_lock);
1868 ret = pktgen_add_device(t, f);
1869 mutex_unlock(&pktgen_thread_lock);
1872 sprintf(pg_result, "OK: add_device=%s", f);
1874 sprintf(pg_result, "ERROR: can not add device %s", f);
1878 if (!strcmp(name, "rem_device_all")) {
1879 mutex_lock(&pktgen_thread_lock);
1880 t->control |= T_REMDEVALL;
1881 mutex_unlock(&pktgen_thread_lock);
1882 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */
1884 sprintf(pg_result, "OK: rem_device_all");
1888 if (!strcmp(name, "max_before_softirq")) {
1889 sprintf(pg_result, "OK: Note! max_before_softirq is obsoleted -- Do not use");
1899 static int pktgen_thread_open(struct inode *inode, struct file *file)
1901 return single_open(file, pktgen_thread_show, PDE_DATA(inode));
1904 static const struct file_operations pktgen_thread_fops = {
1905 .open = pktgen_thread_open,
1907 .llseek = seq_lseek,
1908 .write = pktgen_thread_write,
1909 .release = single_release,
1912 /* Think find or remove for NN */
1913 static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn,
1914 const char *ifname, int remove)
1916 struct pktgen_thread *t;
1917 struct pktgen_dev *pkt_dev = NULL;
1918 bool exact = (remove == FIND);
1920 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
1921 pkt_dev = pktgen_find_dev(t, ifname, exact);
1924 pkt_dev->removal_mark = 1;
1925 t->control |= T_REMDEV;
1934 * mark a device for removal
1936 static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
1938 struct pktgen_dev *pkt_dev = NULL;
1939 const int max_tries = 10, msec_per_try = 125;
1942 mutex_lock(&pktgen_thread_lock);
1943 pr_debug("%s: marking %s for removal\n", __func__, ifname);
1947 pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE);
1948 if (pkt_dev == NULL)
1949 break; /* success */
1951 mutex_unlock(&pktgen_thread_lock);
1952 pr_debug("%s: waiting for %s to disappear....\n",
1954 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
1955 mutex_lock(&pktgen_thread_lock);
1957 if (++i >= max_tries) {
1958 pr_err("%s: timed out after waiting %d msec for device %s to be removed\n",
1959 __func__, msec_per_try * i, ifname);
1965 mutex_unlock(&pktgen_thread_lock);
1968 static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev)
1970 struct pktgen_thread *t;
1972 mutex_lock(&pktgen_thread_lock);
1974 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
1975 struct pktgen_dev *pkt_dev;
1978 list_for_each_entry(pkt_dev, &t->if_list, list) {
1979 if (pkt_dev->odev != dev)
1982 proc_remove(pkt_dev->entry);
1984 pkt_dev->entry = proc_create_data(dev->name, 0600,
1988 if (!pkt_dev->entry)
1989 pr_err("can't move proc entry for '%s'\n",
1995 mutex_unlock(&pktgen_thread_lock);
1998 static int pktgen_device_event(struct notifier_block *unused,
1999 unsigned long event, void *ptr)
2001 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2002 struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id);
2004 if (pn->pktgen_exiting)
2007 /* It is OK that we do not hold the group lock right now,
2008 * as we run under the RTNL lock.
2012 case NETDEV_CHANGENAME:
2013 pktgen_change_name(pn, dev);
2016 case NETDEV_UNREGISTER:
2017 pktgen_mark_device(pn, dev->name);
2024 static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn,
2025 struct pktgen_dev *pkt_dev,
2031 for (i = 0; ifname[i] != '@'; i++) {
2039 return dev_get_by_name(pn->net, b);
2043 /* Associate pktgen_dev with a device. */
2045 static int pktgen_setup_dev(const struct pktgen_net *pn,
2046 struct pktgen_dev *pkt_dev, const char *ifname)
2048 struct net_device *odev;
2051 /* Clean old setups */
2052 if (pkt_dev->odev) {
2053 dev_put(pkt_dev->odev);
2054 pkt_dev->odev = NULL;
2057 odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname);
2059 pr_err("no such netdevice: \"%s\"\n", ifname);
2063 if (odev->type != ARPHRD_ETHER) {
2064 pr_err("not an ethernet device: \"%s\"\n", ifname);
2066 } else if (!netif_running(odev)) {
2067 pr_err("device is down: \"%s\"\n", ifname);
2070 pkt_dev->odev = odev;
2078 /* Read pkt_dev from the interface and set up internal pktgen_dev
2079 * structure to have the right information to create/send packets
2081 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2085 if (!pkt_dev->odev) {
2086 pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n");
2087 sprintf(pkt_dev->result,
2088 "ERROR: pkt_dev->odev == NULL in setup_inject.\n");
2092 /* make sure that we don't pick a non-existing transmit queue */
2093 ntxq = pkt_dev->odev->real_num_tx_queues;
2095 if (ntxq <= pkt_dev->queue_map_min) {
2096 pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
2097 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
2099 pkt_dev->queue_map_min = (ntxq ?: 1) - 1;
2101 if (pkt_dev->queue_map_max >= ntxq) {
2102 pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
2103 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
2105 pkt_dev->queue_map_max = (ntxq ?: 1) - 1;
2108 /* Default to the interface's mac if not explicitly set. */
2110 if (is_zero_ether_addr(pkt_dev->src_mac))
2111 ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr);
2113 /* Set up Dest MAC */
2114 ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac);
2116 if (pkt_dev->flags & F_IPV6) {
2117 int i, set = 0, err = 1;
2118 struct inet6_dev *idev;
2120 if (pkt_dev->min_pkt_size == 0) {
2121 pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr)
2122 + sizeof(struct udphdr)
2123 + sizeof(struct pktgen_hdr)
2124 + pkt_dev->pkt_overhead;
2127 for (i = 0; i < sizeof(struct in6_addr); i++)
2128 if (pkt_dev->cur_in6_saddr.s6_addr[i]) {
2136 * Use linklevel address if unconfigured.
2138 * use ipv6_get_lladdr if/when it's get exported
2142 idev = __in6_dev_get(pkt_dev->odev);
2144 struct inet6_ifaddr *ifp;
2146 read_lock_bh(&idev->lock);
2147 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2148 if ((ifp->scope & IFA_LINK) &&
2149 !(ifp->flags & IFA_F_TENTATIVE)) {
2150 pkt_dev->cur_in6_saddr = ifp->addr;
2155 read_unlock_bh(&idev->lock);
2159 pr_err("ERROR: IPv6 link address not available\n");
2162 if (pkt_dev->min_pkt_size == 0) {
2163 pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr)
2164 + sizeof(struct udphdr)
2165 + sizeof(struct pktgen_hdr)
2166 + pkt_dev->pkt_overhead;
2169 pkt_dev->saddr_min = 0;
2170 pkt_dev->saddr_max = 0;
2171 if (strlen(pkt_dev->src_min) == 0) {
2173 struct in_device *in_dev;
2176 in_dev = __in_dev_get_rcu(pkt_dev->odev);
2178 const struct in_ifaddr *ifa;
2180 ifa = rcu_dereference(in_dev->ifa_list);
2182 pkt_dev->saddr_min = ifa->ifa_address;
2183 pkt_dev->saddr_max = pkt_dev->saddr_min;
2188 pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
2189 pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
2192 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
2193 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
2195 /* Initialize current values. */
2196 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
2197 if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size)
2198 pkt_dev->max_pkt_size = pkt_dev->min_pkt_size;
2200 pkt_dev->cur_dst_mac_offset = 0;
2201 pkt_dev->cur_src_mac_offset = 0;
2202 pkt_dev->cur_saddr = pkt_dev->saddr_min;
2203 pkt_dev->cur_daddr = pkt_dev->daddr_min;
2204 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min;
2205 pkt_dev->cur_udp_src = pkt_dev->udp_src_min;
2206 pkt_dev->nflows = 0;
2210 static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2212 ktime_t start_time, end_time;
2214 struct hrtimer_sleeper t;
2216 hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2217 hrtimer_set_expires(&t.timer, spin_until);
2219 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
2223 start_time = ktime_get();
2224 if (remaining < 100000) {
2225 /* for small delays (<100us), just loop until limit is reached */
2227 end_time = ktime_get();
2228 } while (ktime_compare(end_time, spin_until) < 0);
2231 set_current_state(TASK_INTERRUPTIBLE);
2232 hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_ABS);
2237 hrtimer_cancel(&t.timer);
2238 } while (t.task && pkt_dev->running && !signal_pending(current));
2239 __set_current_state(TASK_RUNNING);
2240 end_time = ktime_get();
2243 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2245 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2246 destroy_hrtimer_on_stack(&t.timer);
2249 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
2251 pkt_dev->pkt_overhead = 0;
2252 pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
2253 pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
2254 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
2257 static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow)
2259 return !!(pkt_dev->flows[flow].flags & F_INIT);
2262 static inline int f_pick(struct pktgen_dev *pkt_dev)
2264 int flow = pkt_dev->curfl;
2266 if (pkt_dev->flags & F_FLOW_SEQ) {
2267 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
2269 pkt_dev->flows[flow].count = 0;
2270 pkt_dev->flows[flow].flags = 0;
2271 pkt_dev->curfl += 1;
2272 if (pkt_dev->curfl >= pkt_dev->cflows)
2273 pkt_dev->curfl = 0; /*reset */
2276 flow = prandom_u32() % pkt_dev->cflows;
2277 pkt_dev->curfl = flow;
2279 if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
2280 pkt_dev->flows[flow].count = 0;
2281 pkt_dev->flows[flow].flags = 0;
2285 return pkt_dev->curfl;
2290 /* If there was already an IPSEC SA, we keep it as is, else
2291 * we go look for it ...
2293 #define DUMMY_MARK 0
2294 static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2296 struct xfrm_state *x = pkt_dev->flows[flow].x;
2297 struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id);
2301 /* We need as quick as possible to find the right SA
2302 * Searching with minimum criteria to archieve this.
2304 x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
2306 /* slow path: we dont already have xfrm_state */
2307 x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0,
2308 (xfrm_address_t *)&pkt_dev->cur_daddr,
2309 (xfrm_address_t *)&pkt_dev->cur_saddr,
2312 pkt_dev->ipsproto, 0);
2315 pkt_dev->flows[flow].x = x;
2316 set_pkt_overhead(pkt_dev);
2317 pkt_dev->pkt_overhead += x->props.header_len;
2323 static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2326 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
2327 pkt_dev->cur_queue_map = smp_processor_id();
2329 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
2331 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2333 (pkt_dev->queue_map_max -
2334 pkt_dev->queue_map_min + 1)
2335 + pkt_dev->queue_map_min;
2337 t = pkt_dev->cur_queue_map + 1;
2338 if (t > pkt_dev->queue_map_max)
2339 t = pkt_dev->queue_map_min;
2341 pkt_dev->cur_queue_map = t;
2343 pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues;
2346 /* generate ipv6 source addr */
2347 static void set_src_in6_addr(struct pktgen_dev *pkt_dev)
2349 u64 min6, max6, rand, i;
2350 struct in6_addr addr6;
2353 min6 = pkt_dev->min_in6_l;
2354 max6 = pkt_dev->max_in6_l;
2356 /* only generate source address in least significant 64 bits range
2357 * most significant 64 bits must be equal
2359 if (pkt_dev->max_in6_h != pkt_dev->min_in6_h || min6 >= max6)
2362 addr6 = pkt_dev->min_in6_saddr;
2363 t = (__be64 *)addr6.s6_addr + 1;
2365 if (pkt_dev->flags & F_IPSRC_RND) {
2367 prandom_bytes(&rand, sizeof(rand));
2368 rand = rand % (max6 - min6) + min6;
2369 addr_l = cpu_to_be64(rand);
2370 memcpy(t, &addr_l, 8);
2371 } while (ipv6_addr_loopback(&addr6) ||
2372 ipv6_addr_v4mapped(&addr6) ||
2373 ipv6_addr_is_multicast(&addr6));
2375 addr6 = pkt_dev->cur_in6_saddr;
2376 i = be64_to_cpu(*t);
2379 addr_l = cpu_to_be64(i);
2380 memcpy(t, &addr_l, 8);
2382 pkt_dev->cur_in6_saddr = addr6;
2385 /* Increment/randomize headers according to flags and current values
2386 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2388 static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2394 if (pkt_dev->cflows)
2395 flow = f_pick(pkt_dev);
2397 /* Deal with source MAC */
2398 if (pkt_dev->src_mac_count > 1) {
2402 if (pkt_dev->flags & F_MACSRC_RND)
2403 mc = prandom_u32() % pkt_dev->src_mac_count;
2405 mc = pkt_dev->cur_src_mac_offset++;
2406 if (pkt_dev->cur_src_mac_offset >=
2407 pkt_dev->src_mac_count)
2408 pkt_dev->cur_src_mac_offset = 0;
2411 tmp = pkt_dev->src_mac[5] + (mc & 0xFF);
2412 pkt_dev->hh[11] = tmp;
2413 tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8));
2414 pkt_dev->hh[10] = tmp;
2415 tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8));
2416 pkt_dev->hh[9] = tmp;
2417 tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8));
2418 pkt_dev->hh[8] = tmp;
2419 tmp = (pkt_dev->src_mac[1] + (tmp >> 8));
2420 pkt_dev->hh[7] = tmp;
2423 /* Deal with Destination MAC */
2424 if (pkt_dev->dst_mac_count > 1) {
2428 if (pkt_dev->flags & F_MACDST_RND)
2429 mc = prandom_u32() % pkt_dev->dst_mac_count;
2432 mc = pkt_dev->cur_dst_mac_offset++;
2433 if (pkt_dev->cur_dst_mac_offset >=
2434 pkt_dev->dst_mac_count) {
2435 pkt_dev->cur_dst_mac_offset = 0;
2439 tmp = pkt_dev->dst_mac[5] + (mc & 0xFF);
2440 pkt_dev->hh[5] = tmp;
2441 tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8));
2442 pkt_dev->hh[4] = tmp;
2443 tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8));
2444 pkt_dev->hh[3] = tmp;
2445 tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8));
2446 pkt_dev->hh[2] = tmp;
2447 tmp = (pkt_dev->dst_mac[1] + (tmp >> 8));
2448 pkt_dev->hh[1] = tmp;
2451 if (pkt_dev->flags & F_MPLS_RND) {
2453 for (i = 0; i < pkt_dev->nr_labels; i++)
2454 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
2455 pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
2456 ((__force __be32)prandom_u32() &
2460 if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
2461 pkt_dev->vlan_id = prandom_u32() & (4096 - 1);
2464 if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
2465 pkt_dev->svlan_id = prandom_u32() & (4096 - 1);
2468 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
2469 if (pkt_dev->flags & F_UDPSRC_RND)
2470 pkt_dev->cur_udp_src = prandom_u32() %
2471 (pkt_dev->udp_src_max - pkt_dev->udp_src_min)
2472 + pkt_dev->udp_src_min;
2475 pkt_dev->cur_udp_src++;
2476 if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max)
2477 pkt_dev->cur_udp_src = pkt_dev->udp_src_min;
2481 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
2482 if (pkt_dev->flags & F_UDPDST_RND) {
2483 pkt_dev->cur_udp_dst = prandom_u32() %
2484 (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min)
2485 + pkt_dev->udp_dst_min;
2487 pkt_dev->cur_udp_dst++;
2488 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
2489 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min;
2493 if (!(pkt_dev->flags & F_IPV6)) {
2495 imn = ntohl(pkt_dev->saddr_min);
2496 imx = ntohl(pkt_dev->saddr_max);
2499 if (pkt_dev->flags & F_IPSRC_RND)
2500 t = prandom_u32() % (imx - imn) + imn;
2502 t = ntohl(pkt_dev->cur_saddr);
2508 pkt_dev->cur_saddr = htonl(t);
2511 if (pkt_dev->cflows && f_seen(pkt_dev, flow)) {
2512 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr;
2514 imn = ntohl(pkt_dev->daddr_min);
2515 imx = ntohl(pkt_dev->daddr_max);
2519 if (pkt_dev->flags & F_IPDST_RND) {
2525 } while (ipv4_is_loopback(s) ||
2526 ipv4_is_multicast(s) ||
2527 ipv4_is_lbcast(s) ||
2528 ipv4_is_zeronet(s) ||
2529 ipv4_is_local_multicast(s));
2530 pkt_dev->cur_daddr = s;
2532 t = ntohl(pkt_dev->cur_daddr);
2537 pkt_dev->cur_daddr = htonl(t);
2540 if (pkt_dev->cflows) {
2541 pkt_dev->flows[flow].flags |= F_INIT;
2542 pkt_dev->flows[flow].cur_daddr =
2545 if (pkt_dev->flags & F_IPSEC)
2546 get_ipsec_sa(pkt_dev, flow);
2551 } else { /* IPV6 * */
2553 set_src_in6_addr(pkt_dev);
2555 if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) {
2558 /* Only random destinations yet */
2560 for (i = 0; i < 4; i++) {
2561 pkt_dev->cur_in6_daddr.s6_addr32[i] =
2562 (((__force __be32)prandom_u32() |
2563 pkt_dev->min_in6_daddr.s6_addr32[i]) &
2564 pkt_dev->max_in6_daddr.s6_addr32[i]);
2569 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
2571 if (pkt_dev->flags & F_TXSIZE_RND) {
2573 (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size)
2574 + pkt_dev->min_pkt_size;
2576 t = pkt_dev->cur_pkt_size + 1;
2577 if (t > pkt_dev->max_pkt_size)
2578 t = pkt_dev->min_pkt_size;
2580 pkt_dev->cur_pkt_size = t;
2583 set_cur_queue_map(pkt_dev);
2585 pkt_dev->flows[flow].count++;
2590 static u32 pktgen_dst_metrics[RTAX_MAX + 1] = {
2592 [RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */
2595 static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2597 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2599 struct net *net = dev_net(pkt_dev->odev);
2603 /* XXX: we dont support tunnel mode for now until
2604 * we resolve the dst issue */
2605 if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0))
2608 /* But when user specify an valid SPI, transformation
2609 * supports both transport/tunnel mode + ESP/AH type.
2611 if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0))
2612 skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF;
2615 err = pktgen_xfrm_outer_mode_output(x, skb);
2616 rcu_read_unlock_bh();
2618 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
2621 err = x->type->output(x, skb);
2623 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
2626 spin_lock_bh(&x->lock);
2627 x->curlft.bytes += skb->len;
2628 x->curlft.packets++;
2629 spin_unlock_bh(&x->lock);
2634 static void free_SAs(struct pktgen_dev *pkt_dev)
2636 if (pkt_dev->cflows) {
2637 /* let go of the SAs if we have them */
2639 for (i = 0; i < pkt_dev->cflows; i++) {
2640 struct xfrm_state *x = pkt_dev->flows[i].x;
2643 pkt_dev->flows[i].x = NULL;
2649 static int process_ipsec(struct pktgen_dev *pkt_dev,
2650 struct sk_buff *skb, __be16 protocol)
2652 if (pkt_dev->flags & F_IPSEC) {
2653 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2660 nhead = x->props.header_len - skb_headroom(skb);
2662 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2664 pr_err("Error expanding ipsec packet %d\n",
2670 /* ipsec is not expecting ll header */
2671 skb_pull(skb, ETH_HLEN);
2672 ret = pktgen_output_ipsec(skb, pkt_dev);
2674 pr_err("Error creating ipsec packet %d\n", ret);
2678 eth = skb_push(skb, ETH_HLEN);
2679 memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN);
2680 eth->h_proto = protocol;
2682 /* Update IPv4 header len as well as checksum value */
2684 iph->tot_len = htons(skb->len - ETH_HLEN);
2695 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2698 for (i = 0; i < pkt_dev->nr_labels; i++)
2699 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
2702 *mpls |= MPLS_STACK_BOTTOM;
2705 static inline __be16 build_tci(unsigned int id, unsigned int cfi,
2708 return htons(id | (cfi << 12) | (prio << 13));
2711 static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2714 struct timespec64 timestamp;
2715 struct pktgen_hdr *pgh;
2717 pgh = skb_put(skb, sizeof(*pgh));
2718 datalen -= sizeof(*pgh);
2720 if (pkt_dev->nfrags <= 0) {
2721 skb_put_zero(skb, datalen);
2723 int frags = pkt_dev->nfrags;
2728 if (frags > MAX_SKB_FRAGS)
2729 frags = MAX_SKB_FRAGS;
2730 len = datalen - frags * PAGE_SIZE;
2732 skb_put_zero(skb, len);
2733 datalen = frags * PAGE_SIZE;
2737 frag_len = (datalen/frags) < PAGE_SIZE ?
2738 (datalen/frags) : PAGE_SIZE;
2739 while (datalen > 0) {
2740 if (unlikely(!pkt_dev->page)) {
2741 int node = numa_node_id();
2743 if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
2744 node = pkt_dev->node;
2745 pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2749 get_page(pkt_dev->page);
2750 skb_frag_set_page(skb, i, pkt_dev->page);
2751 skb_frag_off_set(&skb_shinfo(skb)->frags[i], 0);
2752 /*last fragment, fill rest of data*/
2753 if (i == (frags - 1))
2754 skb_frag_size_set(&skb_shinfo(skb)->frags[i],
2755 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE));
2757 skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
2758 datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
2759 skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2760 skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2762 skb_shinfo(skb)->nr_frags = i;
2766 /* Stamp the time, and sequence number,
2767 * convert them to network byte order
2769 pgh->pgh_magic = htonl(PKTGEN_MAGIC);
2770 pgh->seq_num = htonl(pkt_dev->seq_num);
2772 if (pkt_dev->flags & F_NO_TIMESTAMP) {
2777 * pgh->tv_sec wraps in y2106 when interpreted as unsigned
2778 * as done by wireshark, or y2038 when interpreted as signed.
2779 * This is probably harmless, but if anyone wants to improve
2780 * it, we could introduce a variant that puts 64-bit nanoseconds
2781 * into the respective header bytes.
2782 * This would also be slightly faster to read.
2784 ktime_get_real_ts64(×tamp);
2785 pgh->tv_sec = htonl(timestamp.tv_sec);
2786 pgh->tv_usec = htonl(timestamp.tv_nsec / NSEC_PER_USEC);
2790 static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
2791 struct pktgen_dev *pkt_dev)
2793 unsigned int extralen = LL_RESERVED_SPACE(dev);
2794 struct sk_buff *skb = NULL;
2797 size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
2798 if (pkt_dev->flags & F_NODE) {
2799 int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
2801 skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
2803 skb_reserve(skb, NET_SKB_PAD);
2807 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
2810 /* the caller pre-fetches from skb->data and reserves for the mac hdr */
2812 skb_reserve(skb, extralen - 16);
2817 static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2818 struct pktgen_dev *pkt_dev)
2820 struct sk_buff *skb = NULL;
2822 struct udphdr *udph;
2825 __be16 protocol = htons(ETH_P_IP);
2827 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
2828 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2829 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2830 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2833 if (pkt_dev->nr_labels)
2834 protocol = htons(ETH_P_MPLS_UC);
2836 if (pkt_dev->vlan_id != 0xffff)
2837 protocol = htons(ETH_P_8021Q);
2839 /* Update any of the values, used when we're incrementing various
2842 mod_cur_headers(pkt_dev);
2843 queue_map = pkt_dev->cur_queue_map;
2845 skb = pktgen_alloc_skb(odev, pkt_dev);
2847 sprintf(pkt_dev->result, "No memory");
2851 prefetchw(skb->data);
2852 skb_reserve(skb, 16);
2854 /* Reserve for ethernet and IP header */
2855 eth = skb_push(skb, 14);
2856 mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
2857 if (pkt_dev->nr_labels)
2858 mpls_push(mpls, pkt_dev);
2860 if (pkt_dev->vlan_id != 0xffff) {
2861 if (pkt_dev->svlan_id != 0xffff) {
2862 svlan_tci = skb_put(skb, sizeof(__be16));
2863 *svlan_tci = build_tci(pkt_dev->svlan_id,
2866 svlan_encapsulated_proto = skb_put(skb,
2868 *svlan_encapsulated_proto = htons(ETH_P_8021Q);
2870 vlan_tci = skb_put(skb, sizeof(__be16));
2871 *vlan_tci = build_tci(pkt_dev->vlan_id,
2874 vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
2875 *vlan_encapsulated_proto = htons(ETH_P_IP);
2878 skb_reset_mac_header(skb);
2879 skb_set_network_header(skb, skb->len);
2880 iph = skb_put(skb, sizeof(struct iphdr));
2882 skb_set_transport_header(skb, skb->len);
2883 udph = skb_put(skb, sizeof(struct udphdr));
2884 skb_set_queue_mapping(skb, queue_map);
2885 skb->priority = pkt_dev->skb_priority;
2887 memcpy(eth, pkt_dev->hh, 12);
2888 *(__be16 *) & eth[12] = protocol;
2890 /* Eth + IPh + UDPh + mpls */
2891 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
2892 pkt_dev->pkt_overhead;
2893 if (datalen < 0 || datalen < sizeof(struct pktgen_hdr))
2894 datalen = sizeof(struct pktgen_hdr);
2896 udph->source = htons(pkt_dev->cur_udp_src);
2897 udph->dest = htons(pkt_dev->cur_udp_dst);
2898 udph->len = htons(datalen + 8); /* DATA + udphdr */
2904 iph->tos = pkt_dev->tos;
2905 iph->protocol = IPPROTO_UDP; /* UDP */
2906 iph->saddr = pkt_dev->cur_saddr;
2907 iph->daddr = pkt_dev->cur_daddr;
2908 iph->id = htons(pkt_dev->ip_id);
2911 iplen = 20 + 8 + datalen;
2912 iph->tot_len = htons(iplen);
2914 skb->protocol = protocol;
2916 skb->pkt_type = PACKET_HOST;
2918 pktgen_finalize_skb(pkt_dev, skb, datalen);
2920 if (!(pkt_dev->flags & F_UDPCSUM)) {
2921 skb->ip_summed = CHECKSUM_NONE;
2922 } else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)) {
2923 skb->ip_summed = CHECKSUM_PARTIAL;
2925 udp4_hwcsum(skb, iph->saddr, iph->daddr);
2927 __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
2929 /* add protocol-dependent pseudo-header */
2930 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
2931 datalen + 8, IPPROTO_UDP, csum);
2933 if (udph->check == 0)
2934 udph->check = CSUM_MANGLED_0;
2938 if (!process_ipsec(pkt_dev, skb, protocol))
2945 static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2946 struct pktgen_dev *pkt_dev)
2948 struct sk_buff *skb = NULL;
2950 struct udphdr *udph;
2951 int datalen, udplen;
2952 struct ipv6hdr *iph;
2953 __be16 protocol = htons(ETH_P_IPV6);
2955 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
2956 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2957 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2958 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2961 if (pkt_dev->nr_labels)
2962 protocol = htons(ETH_P_MPLS_UC);
2964 if (pkt_dev->vlan_id != 0xffff)
2965 protocol = htons(ETH_P_8021Q);
2967 /* Update any of the values, used when we're incrementing various
2970 mod_cur_headers(pkt_dev);
2971 queue_map = pkt_dev->cur_queue_map;
2973 skb = pktgen_alloc_skb(odev, pkt_dev);
2975 sprintf(pkt_dev->result, "No memory");
2979 prefetchw(skb->data);
2980 skb_reserve(skb, 16);
2982 /* Reserve for ethernet and IP header */
2983 eth = skb_push(skb, 14);
2984 mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
2985 if (pkt_dev->nr_labels)
2986 mpls_push(mpls, pkt_dev);
2988 if (pkt_dev->vlan_id != 0xffff) {
2989 if (pkt_dev->svlan_id != 0xffff) {
2990 svlan_tci = skb_put(skb, sizeof(__be16));
2991 *svlan_tci = build_tci(pkt_dev->svlan_id,
2994 svlan_encapsulated_proto = skb_put(skb,
2996 *svlan_encapsulated_proto = htons(ETH_P_8021Q);
2998 vlan_tci = skb_put(skb, sizeof(__be16));
2999 *vlan_tci = build_tci(pkt_dev->vlan_id,
3002 vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
3003 *vlan_encapsulated_proto = htons(ETH_P_IPV6);
3006 skb_reset_mac_header(skb);
3007 skb_set_network_header(skb, skb->len);
3008 iph = skb_put(skb, sizeof(struct ipv6hdr));
3010 skb_set_transport_header(skb, skb->len);
3011 udph = skb_put(skb, sizeof(struct udphdr));
3012 skb_set_queue_mapping(skb, queue_map);
3013 skb->priority = pkt_dev->skb_priority;
3015 memcpy(eth, pkt_dev->hh, 12);
3016 *(__be16 *) ð[12] = protocol;
3018 /* Eth + IPh + UDPh + mpls */
3019 datalen = pkt_dev->cur_pkt_size - 14 -
3020 sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
3021 pkt_dev->pkt_overhead;
3023 if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
3024 datalen = sizeof(struct pktgen_hdr);
3025 net_info_ratelimited("increased datalen to %d\n", datalen);
3028 udplen = datalen + sizeof(struct udphdr);
3029 udph->source = htons(pkt_dev->cur_udp_src);
3030 udph->dest = htons(pkt_dev->cur_udp_dst);
3031 udph->len = htons(udplen);
3034 *(__be32 *) iph = htonl(0x60000000); /* Version + flow */
3036 if (pkt_dev->traffic_class) {
3037 /* Version + traffic class + flow (0) */
3038 *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20));
3041 iph->hop_limit = 32;
3043 iph->payload_len = htons(udplen);
3044 iph->nexthdr = IPPROTO_UDP;
3046 iph->daddr = pkt_dev->cur_in6_daddr;
3047 iph->saddr = pkt_dev->cur_in6_saddr;
3049 skb->protocol = protocol;
3051 skb->pkt_type = PACKET_HOST;
3053 pktgen_finalize_skb(pkt_dev, skb, datalen);
3055 if (!(pkt_dev->flags & F_UDPCSUM)) {
3056 skb->ip_summed = CHECKSUM_NONE;
3057 } else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM)) {
3058 skb->ip_summed = CHECKSUM_PARTIAL;
3059 skb->csum_start = skb_transport_header(skb) - skb->head;
3060 skb->csum_offset = offsetof(struct udphdr, check);
3061 udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
3063 __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
3065 /* add protocol-dependent pseudo-header */
3066 udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
3068 if (udph->check == 0)
3069 udph->check = CSUM_MANGLED_0;
3075 static struct sk_buff *fill_packet(struct net_device *odev,
3076 struct pktgen_dev *pkt_dev)
3078 if (pkt_dev->flags & F_IPV6)
3079 return fill_packet_ipv6(odev, pkt_dev);
3081 return fill_packet_ipv4(odev, pkt_dev);
3084 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev)
3086 pkt_dev->seq_num = 1;
3087 pkt_dev->idle_acc = 0;
3089 pkt_dev->tx_bytes = 0;
3090 pkt_dev->errors = 0;
3093 /* Set up structure for sending pkts, clear counters */
3095 static void pktgen_run(struct pktgen_thread *t)
3097 struct pktgen_dev *pkt_dev;
3103 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3106 * setup odev and create initial packet.
3108 pktgen_setup_inject(pkt_dev);
3110 if (pkt_dev->odev) {
3111 pktgen_clear_counters(pkt_dev);
3112 pkt_dev->skb = NULL;
3113 pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
3115 set_pkt_overhead(pkt_dev);
3117 strcpy(pkt_dev->result, "Starting");
3118 pkt_dev->running = 1; /* Cranke yeself! */
3121 strcpy(pkt_dev->result, "Error starting");
3125 t->control &= ~(T_STOP);
3128 static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn)
3130 struct pktgen_thread *t;
3134 mutex_lock(&pktgen_thread_lock);
3136 list_for_each_entry(t, &pn->pktgen_threads, th_list)
3137 t->control |= T_STOP;
3139 mutex_unlock(&pktgen_thread_lock);
3142 static int thread_is_running(const struct pktgen_thread *t)
3144 const struct pktgen_dev *pkt_dev;
3147 list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
3148 if (pkt_dev->running) {
3156 static int pktgen_wait_thread_run(struct pktgen_thread *t)
3158 while (thread_is_running(t)) {
3160 /* note: 't' will still be around even after the unlock/lock
3161 * cycle because pktgen_thread threads are only cleared at
3164 mutex_unlock(&pktgen_thread_lock);
3165 msleep_interruptible(100);
3166 mutex_lock(&pktgen_thread_lock);
3168 if (signal_pending(current))
3176 static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
3178 struct pktgen_thread *t;
3181 /* prevent from racing with rmmod */
3182 if (!try_module_get(THIS_MODULE))
3185 mutex_lock(&pktgen_thread_lock);
3187 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
3188 sig = pktgen_wait_thread_run(t);
3194 list_for_each_entry(t, &pn->pktgen_threads, th_list)
3195 t->control |= (T_STOP);
3197 mutex_unlock(&pktgen_thread_lock);
3198 module_put(THIS_MODULE);
3202 static void pktgen_run_all_threads(struct pktgen_net *pn)
3204 struct pktgen_thread *t;
3208 mutex_lock(&pktgen_thread_lock);
3210 list_for_each_entry(t, &pn->pktgen_threads, th_list)
3211 t->control |= (T_RUN);
3213 mutex_unlock(&pktgen_thread_lock);
3215 /* Propagate thread->control */
3216 schedule_timeout_interruptible(msecs_to_jiffies(125));
3218 pktgen_wait_all_threads_run(pn);
3221 static void pktgen_reset_all_threads(struct pktgen_net *pn)
3223 struct pktgen_thread *t;
3227 mutex_lock(&pktgen_thread_lock);
3229 list_for_each_entry(t, &pn->pktgen_threads, th_list)
3230 t->control |= (T_REMDEVALL);
3232 mutex_unlock(&pktgen_thread_lock);
3234 /* Propagate thread->control */
3235 schedule_timeout_interruptible(msecs_to_jiffies(125));
3237 pktgen_wait_all_threads_run(pn);
3240 static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3242 __u64 bps, mbps, pps;
3243 char *p = pkt_dev->result;
3244 ktime_t elapsed = ktime_sub(pkt_dev->stopped_at,
3245 pkt_dev->started_at);
3246 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3248 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
3249 (unsigned long long)ktime_to_us(elapsed),
3250 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3251 (unsigned long long)ktime_to_us(idle),
3252 (unsigned long long)pkt_dev->sofar,
3253 pkt_dev->cur_pkt_size, nr_frags);
3255 pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC,
3256 ktime_to_ns(elapsed));
3258 bps = pps * 8 * pkt_dev->cur_pkt_size;
3261 do_div(mbps, 1000000);
3262 p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu",
3263 (unsigned long long)pps,
3264 (unsigned long long)mbps,
3265 (unsigned long long)bps,
3266 (unsigned long long)pkt_dev->errors);
3269 /* Set stopped-at timer, remove from running list, do counters & statistics */
3270 static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3272 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
3274 if (!pkt_dev->running) {
3275 pr_warn("interface: %s is already stopped\n",
3280 pkt_dev->running = 0;
3281 kfree_skb(pkt_dev->skb);
3282 pkt_dev->skb = NULL;
3283 pkt_dev->stopped_at = ktime_get();
3285 show_results(pkt_dev, nr_frags);
3290 static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
3292 struct pktgen_dev *pkt_dev, *best = NULL;
3295 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3296 if (!pkt_dev->running)
3300 else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
3308 static void pktgen_stop(struct pktgen_thread *t)
3310 struct pktgen_dev *pkt_dev;
3316 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
3317 pktgen_stop_device(pkt_dev);
3324 * one of our devices needs to be removed - find it
3327 static void pktgen_rem_one_if(struct pktgen_thread *t)
3329 struct list_head *q, *n;
3330 struct pktgen_dev *cur;
3334 list_for_each_safe(q, n, &t->if_list) {
3335 cur = list_entry(q, struct pktgen_dev, list);
3337 if (!cur->removal_mark)
3340 kfree_skb(cur->skb);
3343 pktgen_remove_device(t, cur);
3349 static void pktgen_rem_all_ifs(struct pktgen_thread *t)
3351 struct list_head *q, *n;
3352 struct pktgen_dev *cur;
3356 /* Remove all devices, free mem */
3358 list_for_each_safe(q, n, &t->if_list) {
3359 cur = list_entry(q, struct pktgen_dev, list);
3361 kfree_skb(cur->skb);
3364 pktgen_remove_device(t, cur);
3368 static void pktgen_rem_thread(struct pktgen_thread *t)
3370 /* Remove from the thread list */
3371 remove_proc_entry(t->tsk->comm, t->net->proc_dir);
3374 static void pktgen_resched(struct pktgen_dev *pkt_dev)
3376 ktime_t idle_start = ktime_get();
3378 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
3381 static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3383 ktime_t idle_start = ktime_get();
3385 while (refcount_read(&(pkt_dev->skb->users)) != 1) {
3386 if (signal_pending(current))
3390 pktgen_resched(pkt_dev);
3394 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
3397 static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3399 unsigned int burst = READ_ONCE(pkt_dev->burst);
3400 struct net_device *odev = pkt_dev->odev;
3401 struct netdev_queue *txq;
3402 struct sk_buff *skb;
3405 /* If device is offline, then don't send */
3406 if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
3407 pktgen_stop_device(pkt_dev);
3411 /* This is max DELAY, this has special meaning of
3414 if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
3415 pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX);
3419 /* If no skb or clone count exhausted then get new one */
3420 if (!pkt_dev->skb || (pkt_dev->last_ok &&
3421 ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
3422 /* build a new pkt */
3423 kfree_skb(pkt_dev->skb);
3425 pkt_dev->skb = fill_packet(odev, pkt_dev);
3426 if (pkt_dev->skb == NULL) {
3427 pr_err("ERROR: couldn't allocate skb in fill_packet\n");
3429 pkt_dev->clone_count--; /* back out increment, OOM */
3432 pkt_dev->last_pkt_size = pkt_dev->skb->len;
3433 pkt_dev->clone_count = 0; /* reset counter */
3436 if (pkt_dev->delay && pkt_dev->last_ok)
3437 spin(pkt_dev, pkt_dev->next_tx);
3439 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
3441 skb->protocol = eth_type_trans(skb, skb->dev);
3442 refcount_add(burst, &skb->users);
3445 ret = netif_receive_skb(skb);
3446 if (ret == NET_RX_DROP)
3450 if (refcount_read(&skb->users) != burst) {
3451 /* skb was queued by rps/rfs or taps,
3452 * so cannot reuse this skb
3454 WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
3455 /* get out of the loop and wait
3456 * until skb is consumed
3460 /* skb was 'freed' by stack, so clean few
3464 } while (--burst > 0);
3465 goto out; /* Skips xmit_mode M_START_XMIT */
3466 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
3468 refcount_inc(&pkt_dev->skb->users);
3470 ret = dev_queue_xmit(pkt_dev->skb);
3472 case NET_XMIT_SUCCESS:
3475 pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3479 /* These are all valid return codes for a qdisc but
3480 * indicate packets are being dropped or will likely
3483 case NETDEV_TX_BUSY:
3484 /* qdisc may call dev_hard_start_xmit directly in cases
3485 * where no queues exist e.g. loopback device, virtual
3486 * devices, etc. In this case we need to handle
3491 net_info_ratelimited("%s xmit error: %d\n",
3492 pkt_dev->odevname, ret);
3498 txq = skb_get_tx_queue(odev, pkt_dev->skb);
3502 HARD_TX_LOCK(odev, txq, smp_processor_id());
3504 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
3505 pkt_dev->last_ok = 0;
3508 refcount_add(burst, &pkt_dev->skb->users);
3511 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
3515 pkt_dev->last_ok = 1;
3518 pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3519 if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq))
3524 /* skb has been consumed */
3527 default: /* Drivers are not supposed to return other values! */
3528 net_info_ratelimited("%s xmit error: %d\n",
3529 pkt_dev->odevname, ret);
3532 case NETDEV_TX_BUSY:
3533 /* Retry it next time */
3534 refcount_dec(&(pkt_dev->skb->users));
3535 pkt_dev->last_ok = 0;
3537 if (unlikely(burst))
3538 WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
3540 HARD_TX_UNLOCK(odev, txq);
3545 /* If pkt_dev->count is zero, then run forever */
3546 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
3547 pktgen_wait_for_skb(pkt_dev);
3549 /* Done with this */
3550 pktgen_stop_device(pkt_dev);
3555 * Main loop of the thread goes here
3558 static int pktgen_thread_worker(void *arg)
3561 struct pktgen_thread *t = arg;
3562 struct pktgen_dev *pkt_dev = NULL;
3565 BUG_ON(smp_processor_id() != cpu);
3567 init_waitqueue_head(&t->queue);
3568 complete(&t->start_done);
3570 pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current));
3574 while (!kthread_should_stop()) {
3575 pkt_dev = next_to_run(t);
3577 if (unlikely(!pkt_dev && t->control == 0)) {
3578 if (t->net->pktgen_exiting)
3580 wait_event_interruptible_timeout(t->queue,
3587 if (likely(pkt_dev)) {
3588 pktgen_xmit(pkt_dev);
3591 pktgen_resched(pkt_dev);
3596 if (t->control & T_STOP) {
3598 t->control &= ~(T_STOP);
3601 if (t->control & T_RUN) {
3603 t->control &= ~(T_RUN);
3606 if (t->control & T_REMDEVALL) {
3607 pktgen_rem_all_ifs(t);
3608 t->control &= ~(T_REMDEVALL);
3611 if (t->control & T_REMDEV) {
3612 pktgen_rem_one_if(t);
3613 t->control &= ~(T_REMDEV);
3619 pr_debug("%s stopping all device\n", t->tsk->comm);
3622 pr_debug("%s removing all device\n", t->tsk->comm);
3623 pktgen_rem_all_ifs(t);
3625 pr_debug("%s removing thread\n", t->tsk->comm);
3626 pktgen_rem_thread(t);
3631 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
3632 const char *ifname, bool exact)
3634 struct pktgen_dev *p, *pkt_dev = NULL;
3635 size_t len = strlen(ifname);
3638 list_for_each_entry_rcu(p, &t->if_list, list)
3639 if (strncmp(p->odevname, ifname, len) == 0) {
3640 if (p->odevname[len]) {
3641 if (exact || p->odevname[len] != '@')
3649 pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev);
3654 * Adds a dev at front of if_list.
3657 static int add_dev_to_thread(struct pktgen_thread *t,
3658 struct pktgen_dev *pkt_dev)
3662 /* This function cannot be called concurrently, as its called
3663 * under pktgen_thread_lock mutex, but it can run from
3664 * userspace on another CPU than the kthread. The if_lock()
3665 * is used here to sync with concurrent instances of
3666 * _rem_dev_from_if_list() invoked via kthread, which is also
3667 * updating the if_list */
3670 if (pkt_dev->pg_thread) {
3671 pr_err("ERROR: already assigned to a thread\n");
3676 pkt_dev->running = 0;
3677 pkt_dev->pg_thread = t;
3678 list_add_rcu(&pkt_dev->list, &t->if_list);
3685 /* Called under thread lock */
3687 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3689 struct pktgen_dev *pkt_dev;
3691 int node = cpu_to_node(t->cpu);
3693 /* We don't allow a device to be on several threads */
3695 pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND);
3697 pr_err("ERROR: interface already used\n");
3701 pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node);
3705 strcpy(pkt_dev->odevname, ifname);
3706 pkt_dev->flows = vzalloc_node(array_size(MAX_CFLOWS,
3707 sizeof(struct flow_state)),
3709 if (pkt_dev->flows == NULL) {
3714 pkt_dev->removal_mark = 0;
3715 pkt_dev->nfrags = 0;
3716 pkt_dev->delay = pg_delay_d;
3717 pkt_dev->count = pg_count_d;
3719 pkt_dev->udp_src_min = 9; /* sink port */
3720 pkt_dev->udp_src_max = 9;
3721 pkt_dev->udp_dst_min = 9;
3722 pkt_dev->udp_dst_max = 9;
3723 pkt_dev->vlan_p = 0;
3724 pkt_dev->vlan_cfi = 0;
3725 pkt_dev->vlan_id = 0xffff;
3726 pkt_dev->svlan_p = 0;
3727 pkt_dev->svlan_cfi = 0;
3728 pkt_dev->svlan_id = 0xffff;
3730 pkt_dev->node = NUMA_NO_NODE;
3732 err = pktgen_setup_dev(t->net, pkt_dev, ifname);
3735 if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)
3736 pkt_dev->clone_skb = pg_clone_skb_d;
3738 pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir,
3739 &pktgen_if_fops, pkt_dev);
3740 if (!pkt_dev->entry) {
3741 pr_err("cannot create %s/%s procfs entry\n",
3742 PG_PROC_DIR, ifname);
3747 pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
3748 pkt_dev->ipsproto = IPPROTO_ESP;
3750 /* xfrm tunnel mode needs additional dst to extract outter
3751 * ip header protocol/ttl/id field, here creat a phony one.
3752 * instead of looking for a valid rt, which definitely hurting
3753 * performance under such circumstance.
3755 pkt_dev->dstops.family = AF_INET;
3756 pkt_dev->xdst.u.dst.dev = pkt_dev->odev;
3757 dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false);
3758 pkt_dev->xdst.child = &pkt_dev->xdst.u.dst;
3759 pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops;
3762 return add_dev_to_thread(t, pkt_dev);
3764 dev_put(pkt_dev->odev);
3769 vfree(pkt_dev->flows);
3774 static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3776 struct pktgen_thread *t;
3777 struct proc_dir_entry *pe;
3778 struct task_struct *p;
3780 t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
3783 pr_err("ERROR: out of memory, can't create new thread\n");
3787 mutex_init(&t->if_lock);
3790 INIT_LIST_HEAD(&t->if_list);
3792 list_add_tail(&t->th_list, &pn->pktgen_threads);
3793 init_completion(&t->start_done);
3795 p = kthread_create_on_node(pktgen_thread_worker,
3798 "kpktgend_%d", cpu);
3800 pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
3801 list_del(&t->th_list);
3805 kthread_bind(p, cpu);
3808 pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
3809 &pktgen_thread_fops, t);
3811 pr_err("cannot create %s/%s procfs entry\n",
3812 PG_PROC_DIR, t->tsk->comm);
3814 list_del(&t->th_list);
3822 wait_for_completion(&t->start_done);
3828 * Removes a device from the thread if_list.
3830 static void _rem_dev_from_if_list(struct pktgen_thread *t,
3831 struct pktgen_dev *pkt_dev)
3833 struct list_head *q, *n;
3834 struct pktgen_dev *p;
3837 list_for_each_safe(q, n, &t->if_list) {
3838 p = list_entry(q, struct pktgen_dev, list);
3840 list_del_rcu(&p->list);
3845 static int pktgen_remove_device(struct pktgen_thread *t,
3846 struct pktgen_dev *pkt_dev)
3848 pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
3850 if (pkt_dev->running) {
3851 pr_warn("WARNING: trying to remove a running interface, stopping it now\n");
3852 pktgen_stop_device(pkt_dev);
3855 /* Dis-associate from the interface */
3857 if (pkt_dev->odev) {
3858 dev_put(pkt_dev->odev);
3859 pkt_dev->odev = NULL;
3862 /* Remove proc before if_list entry, because add_device uses
3863 * list to determine if interface already exist, avoid race
3864 * with proc_create_data() */
3865 proc_remove(pkt_dev->entry);
3867 /* And update the thread if_list */
3868 _rem_dev_from_if_list(t, pkt_dev);
3873 vfree(pkt_dev->flows);
3875 put_page(pkt_dev->page);
3876 kfree_rcu(pkt_dev, rcu);
3880 static int __net_init pg_net_init(struct net *net)
3882 struct pktgen_net *pn = net_generic(net, pg_net_id);
3883 struct proc_dir_entry *pe;
3887 INIT_LIST_HEAD(&pn->pktgen_threads);
3888 pn->pktgen_exiting = false;
3889 pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
3890 if (!pn->proc_dir) {
3891 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
3894 pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_fops);
3896 pr_err("cannot create %s procfs entry\n", PGCTRL);
3901 for_each_online_cpu(cpu) {
3904 err = pktgen_create_thread(cpu, pn);
3906 pr_warn("Cannot create thread for cpu %d (%d)\n",
3910 if (list_empty(&pn->pktgen_threads)) {
3911 pr_err("Initialization failed for all threads\n");
3919 remove_proc_entry(PGCTRL, pn->proc_dir);
3921 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
3925 static void __net_exit pg_net_exit(struct net *net)
3927 struct pktgen_net *pn = net_generic(net, pg_net_id);
3928 struct pktgen_thread *t;
3929 struct list_head *q, *n;
3932 /* Stop all interfaces & threads */
3933 pn->pktgen_exiting = true;
3935 mutex_lock(&pktgen_thread_lock);
3936 list_splice_init(&pn->pktgen_threads, &list);
3937 mutex_unlock(&pktgen_thread_lock);
3939 list_for_each_safe(q, n, &list) {
3940 t = list_entry(q, struct pktgen_thread, th_list);
3941 list_del(&t->th_list);
3942 kthread_stop(t->tsk);
3943 put_task_struct(t->tsk);
3947 remove_proc_entry(PGCTRL, pn->proc_dir);
3948 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
3951 static struct pernet_operations pg_net_ops = {
3952 .init = pg_net_init,
3953 .exit = pg_net_exit,
3955 .size = sizeof(struct pktgen_net),
3958 static int __init pg_init(void)
3962 pr_info("%s", version);
3963 ret = register_pernet_subsys(&pg_net_ops);
3966 ret = register_netdevice_notifier(&pktgen_notifier_block);
3968 unregister_pernet_subsys(&pg_net_ops);
3973 static void __exit pg_cleanup(void)
3975 unregister_netdevice_notifier(&pktgen_notifier_block);
3976 unregister_pernet_subsys(&pg_net_ops);
3977 /* Don't need rcu_barrier() due to use of kfree_rcu() */
3980 module_init(pg_init);
3981 module_exit(pg_cleanup);
3983 MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>");
3984 MODULE_DESCRIPTION("Packet Generator tool");
3985 MODULE_LICENSE("GPL");
3986 MODULE_VERSION(VERSION);
3987 module_param(pg_count_d, int, 0);
3988 MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject");
3989 module_param(pg_delay_d, int, 0);
3990 MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)");
3991 module_param(pg_clone_skb_d, int, 0);
3992 MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet");
3993 module_param(debug, int, 0);
3994 MODULE_PARM_DESC(debug, "Enable debugging of pktgen module");