2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/moduleparam.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h> /* printk() */
20 #include <linux/slab.h> /* kmalloc() */
21 #include <linux/errno.h> /* error codes */
22 #include <linux/types.h> /* size_t */
23 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/netdevice.h> /* struct device, and other headers */
27 #include <linux/etherdevice.h> /* eth_type_trans */
28 #include <linux/skbuff.h>
29 #include <linux/ioctl.h>
30 #include <linux/cdev.h>
31 #include <linux/hugetlb.h>
32 #include <linux/in6.h>
33 #include <linux/timer.h>
34 #include <linux/hrtimer.h>
35 #include <linux/ktime.h>
37 #include <linux/ctype.h>
39 #include <linux/ipv6.h>
40 #include <linux/tcp.h>
41 #include <linux/net_tstamp.h>
42 #include <linux/ptp_clock_kernel.h>
44 #include <asm/checksum.h>
45 #include <asm/homecache.h>
46 #include <gxio/mpipe.h>
49 /* Default transmit lockup timeout period, in jiffies. */
50 #define TILE_NET_TIMEOUT (5 * HZ)
52 /* The maximum number of distinct channels (idesc.channel is 5 bits). */
53 #define TILE_NET_CHANNELS 32
55 /* Maximum number of idescs to handle per "poll". */
56 #define TILE_NET_BATCH 128
58 /* Maximum number of packets to handle per "poll". */
59 #define TILE_NET_WEIGHT 64
61 /* Number of entries in each iqueue. */
62 #define IQUEUE_ENTRIES 512
64 /* Number of entries in each equeue. */
65 #define EQUEUE_ENTRIES 2048
67 /* Total header bytes per equeue slot. Must be big enough for 2 bytes
68 * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
69 * 60 bytes of actual TCP header. We round up to align to cache lines.
71 #define HEADER_BYTES 128
73 /* Maximum completions per cpu per device (must be a power of two).
74 * ISSUE: What is the right number here? If this is too small, then
75 * egress might block waiting for free space in a completions array.
76 * ISSUE: At the least, allocate these only for initialized echannels.
78 #define TILE_NET_MAX_COMPS 64
80 #define MAX_FRAGS (MAX_SKB_FRAGS + 1)
82 /* The "kinds" of buffer stacks (small/large/jumbo). */
85 /* Size of completions data to allocate.
86 * ISSUE: Probably more than needed since we don't use all the channels.
88 #define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
90 /* Size of NotifRing data to allocate. */
91 #define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
93 /* Timeout to wake the per-device TX timer after we stop the queue.
94 * We don't want the timeout too short (adds overhead, and might end
95 * up causing stop/wake/stop/wake cycles) or too long (affects performance).
96 * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
98 #define TX_TIMER_DELAY_USEC 30
100 /* Timeout to wake the per-cpu egress timer to free completions. */
101 #define EGRESS_TIMER_DELAY_USEC 1000
103 MODULE_AUTHOR("Tilera Corporation");
104 MODULE_LICENSE("GPL");
106 /* A "packet fragment" (a chunk of memory). */
112 /* A single completion. */
113 struct tile_net_comp {
114 /* The "complete_count" when the completion will be complete. */
116 /* The buffer to be freed when the completion is complete. */
120 /* The completions for a given cpu and echannel. */
121 struct tile_net_comps {
122 /* The completions. */
123 struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
124 /* The number of completions used. */
125 unsigned long comp_next;
126 /* The number of completions freed. */
127 unsigned long comp_last;
130 /* The transmit wake timer for a given cpu and echannel. */
131 struct tile_net_tx_wake {
133 struct hrtimer timer;
134 struct net_device *dev;
137 /* Info for a specific cpu. */
138 struct tile_net_info {
141 /* A timer for handling egress completions. */
142 struct hrtimer egress_timer;
143 /* True if "egress_timer" is scheduled. */
144 bool egress_timer_scheduled;
147 gxio_mpipe_iqueue_t iqueue;
148 /* The NAPI struct. */
149 struct napi_struct napi;
150 /* Number of buffers (by kind) which must still be provided. */
151 unsigned int num_needed_buffers[MAX_KINDS];
154 /* True if iqueue is valid. */
159 /* Comps for each egress channel. */
160 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
161 /* Transmit wake timer for each egress channel. */
162 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
163 } mpipe[NR_MPIPE_MAX];
166 /* Info for egress on a particular egress channel. */
167 struct tile_net_egress {
169 gxio_mpipe_equeue_t *equeue;
170 /* The headers for TSO. */
171 unsigned char *headers;
174 /* Info for a specific device. */
175 struct tile_net_priv {
176 /* Our network device. */
177 struct net_device *dev;
178 /* The primary link. */
179 gxio_mpipe_link_t link;
180 /* The primary channel, if open, else -1. */
182 /* The "loopify" egress link, if needed. */
183 gxio_mpipe_link_t loopify_link;
184 /* The "loopify" egress channel, if open, else -1. */
186 /* The egress channel (channel or loopify_channel). */
188 /* mPIPE instance, 0 or 1. */
190 /* The timestamp config. */
191 struct hwtstamp_config stamp_cfg;
194 static struct mpipe_data {
195 /* The ingress irq. */
198 /* The "context" for all devices. */
199 gxio_mpipe_context_t context;
201 /* Egress info, indexed by "priv->echannel"
202 * (lazily created as needed).
204 struct tile_net_egress
205 egress_for_echannel[TILE_NET_CHANNELS];
207 /* Devices currently associated with each channel.
208 * NOTE: The array entry can become NULL after ifconfig down, but
209 * we do not free the underlying net_device structures, so it is
210 * safe to use a pointer after reading it from this array.
213 *tile_net_devs_for_channel[TILE_NET_CHANNELS];
215 /* The actual memory allocated for the buffer stacks. */
216 void *buffer_stack_vas[MAX_KINDS];
218 /* The amount of memory allocated for each buffer stack. */
219 size_t buffer_stack_bytes[MAX_KINDS];
221 /* The first buffer stack index
222 * (small = +0, large = +1, jumbo = +2).
224 int first_buffer_stack;
230 /* PTP-specific data. */
231 struct ptp_clock *ptp_clock;
232 struct ptp_clock_info caps;
234 /* Lock for ptp accessors. */
235 struct mutex ptp_lock;
237 } mpipe_data[NR_MPIPE_MAX] = {
238 [0 ... (NR_MPIPE_MAX - 1)] {
240 .first_buffer_stack = -1,
246 /* A mutex for "tile_net_devs_for_channel". */
247 static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
249 /* The per-cpu info. */
250 static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
253 /* The buffer size enums for each buffer stack.
254 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
255 * We avoid the "10384" size because it can induce "false chaining"
256 * on "cut-through" jumbo packets.
258 static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
259 GXIO_MPIPE_BUFFER_SIZE_128,
260 GXIO_MPIPE_BUFFER_SIZE_1664,
261 GXIO_MPIPE_BUFFER_SIZE_16384
264 /* Text value of tile_net.cpus if passed as a module parameter. */
265 static char *network_cpus_string;
267 /* The actual cpus in "network_cpus". */
268 static struct cpumask network_cpus_map;
270 /* If "tile_net.loopify=LINK" was specified, this is "LINK". */
271 static char *loopify_link_name;
273 /* If "tile_net.custom" was specified, this is true. */
274 static bool custom_flag;
276 /* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
277 static uint jumbo_num;
279 /* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
280 static inline int mpipe_instance(struct net_device *dev)
282 struct tile_net_priv *priv = netdev_priv(dev);
283 return priv->instance;
286 /* The "tile_net.cpus" argument specifies the cpus that are dedicated
287 * to handle ingress packets.
289 * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
290 * m, n, x, y are integer numbers that represent the cpus that can be
291 * neither a dedicated cpu nor a dataplane cpu.
293 static bool network_cpus_init(void)
297 if (network_cpus_string == NULL)
300 rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
302 pr_warn("tile_net.cpus=%s: malformed cpu list\n",
303 network_cpus_string);
307 /* Remove dedicated cpus. */
308 cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
310 if (cpumask_empty(&network_cpus_map)) {
311 pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
312 network_cpus_string);
316 pr_info("Linux network CPUs: %*pbl\n",
317 cpumask_pr_args(&network_cpus_map));
321 module_param_named(cpus, network_cpus_string, charp, 0444);
322 MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
324 /* The "tile_net.loopify=LINK" argument causes the named device to
325 * actually use "loop0" for ingress, and "loop1" for egress. This
326 * allows an app to sit between the actual link and linux, passing
327 * (some) packets along to linux, and forwarding (some) packets sent
330 module_param_named(loopify, loopify_link_name, charp, 0444);
331 MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
333 /* The "tile_net.custom" argument causes us to ignore the "conventional"
334 * classifier metadata, in particular, the "l2_offset".
336 module_param_named(custom, custom_flag, bool, 0444);
337 MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
339 /* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
340 * and to allocate the given number of "jumbo" buffers.
342 module_param_named(jumbo, jumbo_num, uint, 0444);
343 MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
345 /* Atomically update a statistics field.
346 * Note that on TILE-Gx, this operation is fire-and-forget on the
347 * issuing core (single-cycle dispatch) and takes only a few cycles
348 * longer than a regular store when the request reaches the home cache.
349 * No expensive bus management overhead is required.
351 static void tile_net_stats_add(unsigned long value, unsigned long *field)
353 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
354 atomic_long_add(value, (atomic_long_t *)field);
357 /* Allocate and push a buffer. */
358 static bool tile_net_provide_buffer(int instance, int kind)
360 struct mpipe_data *md = &mpipe_data[instance];
361 gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
362 size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
363 const unsigned long buffer_alignment = 128;
367 len = sizeof(struct sk_buff **) + buffer_alignment + bs;
368 skb = dev_alloc_skb(len);
372 /* Make room for a back-pointer to 'skb' and guarantee alignment. */
373 skb_reserve(skb, sizeof(struct sk_buff **));
374 skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
376 /* Save a back-pointer to 'skb'. */
377 *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
379 /* Make sure "skb" and the back-pointer have been flushed. */
382 gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
383 (void *)va_to_tile_io_addr(skb->data));
388 /* Convert a raw mpipe buffer to its matching skb pointer. */
389 static struct sk_buff *mpipe_buf_to_skb(void *va)
391 /* Acquire the associated "skb". */
392 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
393 struct sk_buff *skb = *skb_ptr;
396 if (skb->data != va) {
397 /* Panic here since there's a reasonable chance
398 * that corrupt buffers means generic memory
399 * corruption, with unpredictable system effects.
401 panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
408 static void tile_net_pop_all_buffers(int instance, int stack)
410 struct mpipe_data *md = &mpipe_data[instance];
413 tile_io_addr_t addr =
414 (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
418 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
422 /* Provide linux buffers to mPIPE. */
423 static void tile_net_provide_needed_buffers(void)
425 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
427 for (instance = 0; instance < NR_MPIPE_MAX &&
428 info->mpipe[instance].has_iqueue; instance++) {
429 for (kind = 0; kind < MAX_KINDS; kind++) {
430 while (info->mpipe[instance].num_needed_buffers[kind]
432 if (!tile_net_provide_buffer(instance, kind)) {
433 pr_notice("Tile %d still needs"
438 info->mpipe[instance].
439 num_needed_buffers[kind]--;
445 /* Get RX timestamp, and store it in the skb. */
446 static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
447 gxio_mpipe_idesc_t *idesc)
449 if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
450 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
451 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
452 shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
453 idesc->time_stamp_ns);
457 /* Get TX timestamp, and store it in the skb. */
458 static void tile_tx_timestamp(struct sk_buff *skb, int instance)
460 struct skb_shared_info *shtx = skb_shinfo(skb);
461 if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
462 struct mpipe_data *md = &mpipe_data[instance];
463 struct skb_shared_hwtstamps shhwtstamps;
466 shtx->tx_flags |= SKBTX_IN_PROGRESS;
467 gxio_mpipe_get_timestamp(&md->context, &ts);
468 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
469 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
470 skb_tstamp_tx(skb, &shhwtstamps);
474 /* Use ioctl() to enable or disable TX or RX timestamping. */
475 static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
477 struct hwtstamp_config config;
478 struct tile_net_priv *priv = netdev_priv(dev);
480 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
483 if (config.flags) /* reserved for future extensions */
486 switch (config.tx_type) {
487 case HWTSTAMP_TX_OFF:
494 switch (config.rx_filter) {
495 case HWTSTAMP_FILTER_NONE:
497 case HWTSTAMP_FILTER_ALL:
498 case HWTSTAMP_FILTER_SOME:
499 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
500 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
501 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
502 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
503 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
504 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
505 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
506 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
507 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
508 case HWTSTAMP_FILTER_PTP_V2_EVENT:
509 case HWTSTAMP_FILTER_PTP_V2_SYNC:
510 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
511 config.rx_filter = HWTSTAMP_FILTER_ALL;
517 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
520 priv->stamp_cfg = config;
524 static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq)
526 struct tile_net_priv *priv = netdev_priv(dev);
528 if (copy_to_user(rq->ifr_data, &priv->stamp_cfg,
529 sizeof(priv->stamp_cfg)))
535 static inline bool filter_packet(struct net_device *dev, void *buf)
537 /* Filter packets received before we're up. */
538 if (dev == NULL || !(dev->flags & IFF_UP))
541 /* Filter out packets that aren't for us. */
542 if (!(dev->flags & IFF_PROMISC) &&
543 !is_multicast_ether_addr(buf) &&
544 !ether_addr_equal(dev->dev_addr, buf))
550 static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
551 gxio_mpipe_idesc_t *idesc, unsigned long len)
553 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
554 struct tile_net_priv *priv = netdev_priv(dev);
555 int instance = priv->instance;
557 /* Encode the actual packet length. */
560 skb->protocol = eth_type_trans(skb, dev);
562 /* Acknowledge "good" hardware checksums. */
563 if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
564 skb->ip_summed = CHECKSUM_UNNECESSARY;
566 /* Get RX timestamp from idesc. */
567 tile_rx_timestamp(priv, skb, idesc);
569 napi_gro_receive(&info->mpipe[instance].napi, skb);
572 tile_net_stats_add(1, &dev->stats.rx_packets);
573 tile_net_stats_add(len, &dev->stats.rx_bytes);
575 /* Need a new buffer. */
576 if (idesc->size == buffer_size_enums[0])
577 info->mpipe[instance].num_needed_buffers[0]++;
578 else if (idesc->size == buffer_size_enums[1])
579 info->mpipe[instance].num_needed_buffers[1]++;
581 info->mpipe[instance].num_needed_buffers[2]++;
584 /* Handle a packet. Return true if "processed", false if "filtered". */
585 static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
587 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
588 struct mpipe_data *md = &mpipe_data[instance];
589 struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
596 /* Drop packets for which no buffer was available (which can
597 * happen under heavy load), or for which the me/tr/ce flags
598 * are set (which can happen for jumbo cut-through packets,
599 * or with a customized classifier).
601 if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
603 tile_net_stats_add(1, &dev->stats.rx_errors);
607 /* Get the "l2_offset", if allowed. */
608 l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
610 /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
611 va = tile_io_addr_to_va((unsigned long)idesc->va);
613 /* Get the actual packet start/length. */
614 buf = va + l2_offset;
615 len = idesc->l2_size - l2_offset;
617 /* Point "va" at the raw buffer. */
620 filter = filter_packet(dev, buf);
623 tile_net_stats_add(1, &dev->stats.rx_dropped);
625 gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
627 struct sk_buff *skb = mpipe_buf_to_skb(va);
629 /* Skip headroom, and any custom header. */
630 skb_reserve(skb, NET_IP_ALIGN + l2_offset);
632 tile_net_receive_skb(dev, skb, idesc, len);
635 gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
639 /* Handle some packets for the current CPU.
641 * This function handles up to TILE_NET_BATCH idescs per call.
643 * ISSUE: Since we do not provide new buffers until this function is
644 * complete, we must initially provide enough buffers for each network
645 * cpu to fill its iqueue and also its batched idescs.
647 * ISSUE: The "rotting packet" race condition occurs if a packet
648 * arrives after the queue appears to be empty, and before the
649 * hypervisor interrupt is re-enabled.
651 static int tile_net_poll(struct napi_struct *napi, int budget)
653 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
654 unsigned int work = 0;
655 gxio_mpipe_idesc_t *idesc;
657 struct mpipe_data *md;
658 struct info_mpipe *info_mpipe =
659 container_of(napi, struct info_mpipe, napi);
664 instance = info_mpipe->instance;
665 while ((n = gxio_mpipe_iqueue_try_peek(
668 for (i = 0; i < n; i++) {
669 if (i == TILE_NET_BATCH)
671 if (tile_net_handle_packet(instance,
673 if (++work >= budget)
679 /* There are no packets left. */
680 napi_complete(&info_mpipe->napi);
682 md = &mpipe_data[instance];
683 /* Re-enable hypervisor interrupts. */
684 gxio_mpipe_enable_notif_ring_interrupt(
685 &md->context, info->mpipe[instance].iqueue.ring);
687 /* HACK: Avoid the "rotting packet" problem. */
688 if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
689 napi_schedule(&info_mpipe->napi);
691 /* ISSUE: Handle completions? */
694 tile_net_provide_needed_buffers();
699 /* Handle an ingress interrupt from an instance on the current cpu. */
700 static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
702 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
703 napi_schedule(&info->mpipe[(uint64_t)id].napi);
707 /* Free some completions. This must be called with interrupts blocked. */
708 static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
709 struct tile_net_comps *comps,
710 int limit, bool force_update)
713 while (comps->comp_last < comps->comp_next) {
714 unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
715 struct tile_net_comp *comp = &comps->comp_queue[cid];
716 if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
717 force_update || n == 0))
719 dev_kfree_skb_irq(comp->skb);
727 /* Add a completion. This must be called with interrupts blocked.
728 * tile_net_equeue_try_reserve() will have ensured a free completion entry.
730 static void add_comp(gxio_mpipe_equeue_t *equeue,
731 struct tile_net_comps *comps,
732 uint64_t when, struct sk_buff *skb)
734 int cid = comps->comp_next % TILE_NET_MAX_COMPS;
735 comps->comp_queue[cid].when = when;
736 comps->comp_queue[cid].skb = skb;
740 static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
743 struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
744 struct tile_net_priv *priv = netdev_priv(dev);
745 int instance = priv->instance;
746 struct tile_net_tx_wake *tx_wake =
747 &info->mpipe[instance].tx_wake[priv->echannel];
749 hrtimer_start(&tx_wake->timer,
750 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
751 HRTIMER_MODE_REL_PINNED);
754 static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
756 struct tile_net_tx_wake *tx_wake =
757 container_of(t, struct tile_net_tx_wake, timer);
758 netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx);
759 return HRTIMER_NORESTART;
762 /* Make sure the egress timer is scheduled. */
763 static void tile_net_schedule_egress_timer(void)
765 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
767 if (!info->egress_timer_scheduled) {
768 hrtimer_start(&info->egress_timer,
769 ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
770 HRTIMER_MODE_REL_PINNED);
771 info->egress_timer_scheduled = true;
775 /* The "function" for "info->egress_timer".
777 * This timer will reschedule itself as long as there are any pending
778 * completions expected for this tile.
780 static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
782 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
783 unsigned long irqflags;
784 bool pending = false;
787 local_irq_save(irqflags);
789 /* The timer is no longer scheduled. */
790 info->egress_timer_scheduled = false;
792 /* Free all possible comps for this tile. */
793 for (instance = 0; instance < NR_MPIPE_MAX &&
794 info->mpipe[instance].has_iqueue; instance++) {
795 for (i = 0; i < TILE_NET_CHANNELS; i++) {
796 struct tile_net_egress *egress =
797 &mpipe_data[instance].egress_for_echannel[i];
798 struct tile_net_comps *comps =
799 info->mpipe[instance].comps_for_echannel[i];
800 if (!egress || comps->comp_last >= comps->comp_next)
802 tile_net_free_comps(egress->equeue, comps, -1, true);
804 (comps->comp_last < comps->comp_next);
808 /* Reschedule timer if needed. */
810 tile_net_schedule_egress_timer();
812 local_irq_restore(irqflags);
814 return HRTIMER_NORESTART;
817 /* PTP clock operations. */
819 static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
822 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
823 mutex_lock(&md->ptp_lock);
824 if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb))
826 mutex_unlock(&md->ptp_lock);
830 static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
833 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
834 mutex_lock(&md->ptp_lock);
835 if (gxio_mpipe_adjust_timestamp(&md->context, delta))
837 mutex_unlock(&md->ptp_lock);
841 static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
844 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
845 mutex_lock(&md->ptp_lock);
846 if (gxio_mpipe_get_timestamp(&md->context, ts))
848 mutex_unlock(&md->ptp_lock);
852 static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
853 const struct timespec *ts)
856 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
857 mutex_lock(&md->ptp_lock);
858 if (gxio_mpipe_set_timestamp(&md->context, ts))
860 mutex_unlock(&md->ptp_lock);
864 static int ptp_mpipe_enable(struct ptp_clock_info *ptp,
865 struct ptp_clock_request *request, int on)
870 static struct ptp_clock_info ptp_mpipe_caps = {
871 .owner = THIS_MODULE,
872 .name = "mPIPE clock",
873 .max_adj = 999999999,
877 .adjfreq = ptp_mpipe_adjfreq,
878 .adjtime = ptp_mpipe_adjtime,
879 .gettime = ptp_mpipe_gettime,
880 .settime = ptp_mpipe_settime,
881 .enable = ptp_mpipe_enable,
884 /* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
885 static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
890 gxio_mpipe_set_timestamp(&md->context, &ts);
892 mutex_init(&md->ptp_lock);
893 md->caps = ptp_mpipe_caps;
894 md->ptp_clock = ptp_clock_register(&md->caps, NULL);
895 if (IS_ERR(md->ptp_clock))
896 netdev_err(dev, "ptp_clock_register failed %ld\n",
897 PTR_ERR(md->ptp_clock));
900 /* Initialize PTP fields in a new device. */
901 static void init_ptp_dev(struct tile_net_priv *priv)
903 priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
904 priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
907 /* Helper functions for "tile_net_update()". */
908 static void enable_ingress_irq(void *irq)
910 enable_percpu_irq((long)irq, 0);
913 static void disable_ingress_irq(void *irq)
915 disable_percpu_irq((long)irq);
918 /* Helper function for tile_net_open() and tile_net_stop().
919 * Always called under tile_net_devs_for_channel_mutex.
921 static int tile_net_update(struct net_device *dev)
923 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
924 bool saw_channel = false;
925 int instance = mpipe_instance(dev);
926 struct mpipe_data *md = &mpipe_data[instance];
932 gxio_mpipe_rules_init(&rules, &md->context);
934 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
935 if (md->tile_net_devs_for_channel[channel] == NULL)
939 gxio_mpipe_rules_begin(&rules, md->first_bucket,
940 md->num_buckets, NULL);
941 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
943 gxio_mpipe_rules_add_channel(&rules, channel);
946 /* NOTE: This can fail if there is no classifier.
947 * ISSUE: Can anything else cause it to fail?
949 rc = gxio_mpipe_rules_commit(&rules);
951 netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
956 /* Update all cpus, sequentially (to protect "netif_napi_add()").
957 * We use on_each_cpu to handle the IPI mask or unmask.
960 on_each_cpu(disable_ingress_irq,
961 (void *)(long)(md->ingress_irq), 1);
962 for_each_online_cpu(cpu) {
963 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
965 if (!info->mpipe[instance].has_iqueue)
968 if (!info->mpipe[instance].napi_added) {
969 netif_napi_add(dev, &info->mpipe[instance].napi,
970 tile_net_poll, TILE_NET_WEIGHT);
971 info->mpipe[instance].napi_added = true;
973 if (!info->mpipe[instance].napi_enabled) {
974 napi_enable(&info->mpipe[instance].napi);
975 info->mpipe[instance].napi_enabled = true;
978 if (info->mpipe[instance].napi_enabled) {
979 napi_disable(&info->mpipe[instance].napi);
980 info->mpipe[instance].napi_enabled = false;
982 /* FIXME: Drain the iqueue. */
986 on_each_cpu(enable_ingress_irq,
987 (void *)(long)(md->ingress_irq), 1);
989 /* HACK: Allow packets to flow in the simulator. */
991 sim_enable_mpipe_links(instance, -1);
996 /* Initialize a buffer stack. */
997 static int create_buffer_stack(struct net_device *dev,
998 int kind, size_t num_buffers)
1000 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
1001 int instance = mpipe_instance(dev);
1002 struct mpipe_data *md = &mpipe_data[instance];
1003 size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
1004 int stack_idx = md->first_buffer_stack + kind;
1008 /* Round up to 64KB and then use alloc_pages() so we get the
1009 * required 64KB alignment.
1011 md->buffer_stack_bytes[kind] =
1012 ALIGN(needed, 64 * 1024);
1014 va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
1017 "Could not alloc %zd bytes for buffer stack %d\n",
1018 md->buffer_stack_bytes[kind], kind);
1022 /* Initialize the buffer stack. */
1023 rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
1024 buffer_size_enums[kind], va,
1025 md->buffer_stack_bytes[kind], 0);
1027 netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
1029 free_pages_exact(va, md->buffer_stack_bytes[kind]);
1033 md->buffer_stack_vas[kind] = va;
1035 rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
1039 "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
1044 /* Provide initial buffers. */
1045 for (i = 0; i < num_buffers; i++) {
1046 if (!tile_net_provide_buffer(instance, kind)) {
1047 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1055 /* Allocate and initialize mpipe buffer stacks, and register them in
1056 * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
1057 * This routine supports tile_net_init_mpipe(), below.
1059 static int init_buffer_stacks(struct net_device *dev,
1060 int network_cpus_count)
1062 int num_kinds = MAX_KINDS - (jumbo_num == 0);
1065 int instance = mpipe_instance(dev);
1066 struct mpipe_data *md = &mpipe_data[instance];
1068 /* Allocate the buffer stacks. */
1069 rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
1072 "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
1076 md->first_buffer_stack = rc;
1078 /* Enough small/large buffers to (normally) avoid buffer errors. */
1080 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1082 /* Allocate the small memory stack. */
1084 rc = create_buffer_stack(dev, 0, num_buffers);
1086 /* Allocate the large buffer stack. */
1088 rc = create_buffer_stack(dev, 1, num_buffers);
1090 /* Allocate the jumbo buffer stack if needed. */
1091 if (rc >= 0 && jumbo_num != 0)
1092 rc = create_buffer_stack(dev, 2, jumbo_num);
1097 /* Allocate per-cpu resources (memory for completions and idescs).
1098 * This routine supports tile_net_init_mpipe(), below.
1100 static int alloc_percpu_mpipe_resources(struct net_device *dev,
1103 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1105 int instance = mpipe_instance(dev);
1106 struct mpipe_data *md = &mpipe_data[instance];
1110 /* Allocate the "comps". */
1111 order = get_order(COMPS_SIZE);
1112 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
1114 netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
1118 addr = pfn_to_kaddr(page_to_pfn(page));
1119 memset(addr, 0, COMPS_SIZE);
1120 for (i = 0; i < TILE_NET_CHANNELS; i++)
1121 info->mpipe[instance].comps_for_echannel[i] =
1122 addr + i * sizeof(struct tile_net_comps);
1124 /* If this is a network cpu, create an iqueue. */
1125 if (cpu_isset(cpu, network_cpus_map)) {
1126 order = get_order(NOTIF_RING_SIZE);
1127 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
1130 "Failed to alloc %zd bytes iqueue memory\n",
1134 addr = pfn_to_kaddr(page_to_pfn(page));
1135 rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
1136 &md->context, ring++, addr,
1137 NOTIF_RING_SIZE, 0);
1140 "gxio_mpipe_iqueue_init failed: %d\n", rc);
1143 info->mpipe[instance].has_iqueue = true;
1149 /* Initialize NotifGroup and buckets.
1150 * This routine supports tile_net_init_mpipe(), below.
1152 static int init_notif_group_and_buckets(struct net_device *dev,
1153 int ring, int network_cpus_count)
1156 int instance = mpipe_instance(dev);
1157 struct mpipe_data *md = &mpipe_data[instance];
1159 /* Allocate one NotifGroup. */
1160 rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
1162 netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
1168 /* Initialize global num_buckets value. */
1169 if (network_cpus_count > 4)
1170 md->num_buckets = 256;
1171 else if (network_cpus_count > 1)
1172 md->num_buckets = 16;
1174 /* Allocate some buckets, and set global first_bucket value. */
1175 rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
1177 netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
1181 md->first_bucket = rc;
1183 /* Init group and buckets. */
1184 rc = gxio_mpipe_init_notif_group_and_buckets(
1185 &md->context, group, ring, network_cpus_count,
1186 md->first_bucket, md->num_buckets,
1187 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
1189 netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
1190 "mpipe[%d] %d\n", instance, rc);
1197 /* Create an irq and register it, then activate the irq and request
1198 * interrupts on all cores. Note that "ingress_irq" being initialized
1199 * is how we know not to call tile_net_init_mpipe() again.
1200 * This routine supports tile_net_init_mpipe(), below.
1202 static int tile_net_setup_interrupts(struct net_device *dev)
1205 int instance = mpipe_instance(dev);
1206 struct mpipe_data *md = &mpipe_data[instance];
1208 irq = md->ingress_irq;
1210 irq = irq_alloc_hwirq(-1);
1213 "create_irq failed: mpipe[%d] %d\n",
1217 tile_irq_activate(irq, TILE_IRQ_PERCPU);
1219 rc = request_irq(irq, tile_net_handle_ingress_irq,
1220 0, "tile_net", (void *)((uint64_t)instance));
1223 netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
1225 irq_free_hwirq(irq);
1228 md->ingress_irq = irq;
1231 for_each_online_cpu(cpu) {
1232 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1233 if (info->mpipe[instance].has_iqueue) {
1234 gxio_mpipe_request_notif_ring_interrupt(&md->context,
1235 cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
1236 info->mpipe[instance].iqueue.ring);
1243 /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
1244 static void tile_net_init_mpipe_fail(int instance)
1247 struct mpipe_data *md = &mpipe_data[instance];
1249 /* Do cleanups that require the mpipe context first. */
1250 for (kind = 0; kind < MAX_KINDS; kind++) {
1251 if (md->buffer_stack_vas[kind] != NULL) {
1252 tile_net_pop_all_buffers(instance,
1253 md->first_buffer_stack +
1258 /* Destroy mpipe context so the hardware no longer owns any memory. */
1259 gxio_mpipe_destroy(&md->context);
1261 for_each_online_cpu(cpu) {
1262 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1265 info->mpipe[instance].comps_for_echannel[0]),
1266 get_order(COMPS_SIZE));
1267 info->mpipe[instance].comps_for_echannel[0] = NULL;
1268 free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
1269 get_order(NOTIF_RING_SIZE));
1270 info->mpipe[instance].iqueue.idescs = NULL;
1273 for (kind = 0; kind < MAX_KINDS; kind++) {
1274 if (md->buffer_stack_vas[kind] != NULL) {
1275 free_pages_exact(md->buffer_stack_vas[kind],
1276 md->buffer_stack_bytes[kind]);
1277 md->buffer_stack_vas[kind] = NULL;
1281 md->first_buffer_stack = -1;
1282 md->first_bucket = -1;
1285 /* The first time any tilegx network device is opened, we initialize
1286 * the global mpipe state. If this step fails, we fail to open the
1287 * device, but if it succeeds, we never need to do it again, and since
1288 * tile_net can't be unloaded, we never undo it.
1290 * Note that some resources in this path (buffer stack indices,
1291 * bindings from init_buffer_stack, etc.) are hypervisor resources
1292 * that are freed implicitly by gxio_mpipe_destroy().
1294 static int tile_net_init_mpipe(struct net_device *dev)
1298 int first_ring, ring;
1299 int instance = mpipe_instance(dev);
1300 struct mpipe_data *md = &mpipe_data[instance];
1301 int network_cpus_count = cpus_weight(network_cpus_map);
1303 if (!hash_default) {
1304 netdev_err(dev, "Networking requires hash_default!\n");
1308 rc = gxio_mpipe_init(&md->context, instance);
1310 netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
1315 /* Set up the buffer stacks. */
1316 rc = init_buffer_stacks(dev, network_cpus_count);
1320 /* Allocate one NotifRing for each network cpu. */
1321 rc = gxio_mpipe_alloc_notif_rings(&md->context,
1322 network_cpus_count, 0, 0);
1324 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
1329 /* Init NotifRings per-cpu. */
1332 for_each_online_cpu(cpu) {
1333 rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
1339 /* Initialize NotifGroup and buckets. */
1340 rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
1344 /* Create and enable interrupts. */
1345 rc = tile_net_setup_interrupts(dev);
1349 /* Register PTP clock and set mPIPE timestamp, if configured. */
1350 register_ptp_clock(dev, md);
1355 tile_net_init_mpipe_fail(instance);
1359 /* Create persistent egress info for a given egress channel.
1360 * Note that this may be shared between, say, "gbe0" and "xgbe0".
1361 * ISSUE: Defer header allocation until TSO is actually needed?
1363 static int tile_net_init_egress(struct net_device *dev, int echannel)
1365 static int ering = -1;
1366 struct page *headers_page, *edescs_page, *equeue_page;
1367 gxio_mpipe_edesc_t *edescs;
1368 gxio_mpipe_equeue_t *equeue;
1369 unsigned char *headers;
1370 int headers_order, edescs_order, equeue_order;
1373 int instance = mpipe_instance(dev);
1374 struct mpipe_data *md = &mpipe_data[instance];
1376 /* Only initialize once. */
1377 if (md->egress_for_echannel[echannel].equeue != NULL)
1380 /* Allocate memory for the "headers". */
1381 headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
1382 headers_page = alloc_pages(GFP_KERNEL, headers_order);
1383 if (headers_page == NULL) {
1385 "Could not alloc %zd bytes for TSO headers.\n",
1386 PAGE_SIZE << headers_order);
1389 headers = pfn_to_kaddr(page_to_pfn(headers_page));
1391 /* Allocate memory for the "edescs". */
1392 edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
1393 edescs_order = get_order(edescs_size);
1394 edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
1395 if (edescs_page == NULL) {
1397 "Could not alloc %zd bytes for eDMA ring.\n",
1401 edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
1403 /* Allocate memory for the "equeue". */
1404 equeue_order = get_order(sizeof(*equeue));
1405 equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
1406 if (equeue_page == NULL) {
1408 "Could not alloc %zd bytes for equeue info.\n",
1409 PAGE_SIZE << equeue_order);
1412 equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
1414 /* Allocate an edma ring (using a one entry "free list"). */
1416 rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
1418 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
1419 "mpipe[%d] %d\n", instance, rc);
1425 /* Initialize the equeue. */
1426 rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
1427 edescs, edescs_size, 0);
1429 netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
1434 /* Don't reuse the ering later. */
1437 if (jumbo_num != 0) {
1438 /* Make sure "jumbo" packets can be egressed safely. */
1439 if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
1440 /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
1441 netdev_warn(dev, "Jumbo packets may not be egressed"
1442 " properly on channel %d\n", echannel);
1447 md->egress_for_echannel[echannel].equeue = equeue;
1448 md->egress_for_echannel[echannel].headers = headers;
1452 __free_pages(equeue_page, equeue_order);
1455 __free_pages(edescs_page, edescs_order);
1458 __free_pages(headers_page, headers_order);
1464 /* Return channel number for a newly-opened link. */
1465 static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1466 const char *link_name)
1468 int instance = mpipe_instance(dev);
1469 struct mpipe_data *md = &mpipe_data[instance];
1470 int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
1472 netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
1473 link_name, instance, rc);
1476 if (jumbo_num != 0) {
1477 u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
1478 rc = gxio_mpipe_link_set_attr(link, attr, 1);
1481 "Cannot receive jumbo packets on '%s'\n",
1483 gxio_mpipe_link_close(link);
1487 rc = gxio_mpipe_link_channel(link);
1488 if (rc < 0 || rc >= TILE_NET_CHANNELS) {
1489 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
1490 gxio_mpipe_link_close(link);
1496 /* Help the kernel activate the given network interface. */
1497 static int tile_net_open(struct net_device *dev)
1499 struct tile_net_priv *priv = netdev_priv(dev);
1500 int cpu, rc, instance;
1502 mutex_lock(&tile_net_devs_for_channel_mutex);
1504 /* Get the instance info. */
1505 rc = gxio_mpipe_link_instance(dev->name);
1506 if (rc < 0 || rc >= NR_MPIPE_MAX) {
1507 mutex_unlock(&tile_net_devs_for_channel_mutex);
1511 priv->instance = rc;
1513 if (!mpipe_data[rc].context.mmio_fast_base) {
1514 /* Do one-time initialization per instance the first time
1515 * any device is opened.
1517 rc = tile_net_init_mpipe(dev);
1522 /* Determine if this is the "loopify" device. */
1523 if (unlikely((loopify_link_name != NULL) &&
1524 !strcmp(dev->name, loopify_link_name))) {
1525 rc = tile_net_link_open(dev, &priv->link, "loop0");
1529 rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
1532 priv->loopify_channel = rc;
1533 priv->echannel = rc;
1535 rc = tile_net_link_open(dev, &priv->link, dev->name);
1539 priv->echannel = rc;
1542 /* Initialize egress info (if needed). Once ever, per echannel. */
1543 rc = tile_net_init_egress(dev, priv->echannel);
1547 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
1549 rc = tile_net_update(dev);
1553 mutex_unlock(&tile_net_devs_for_channel_mutex);
1555 /* Initialize the transmit wake timer for this device for each cpu. */
1556 for_each_online_cpu(cpu) {
1557 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1558 struct tile_net_tx_wake *tx_wake =
1559 &info->mpipe[instance].tx_wake[priv->echannel];
1561 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
1563 tx_wake->tx_queue_idx = cpu;
1564 tx_wake->timer.function = tile_net_handle_tx_wake_timer;
1568 for_each_online_cpu(cpu)
1569 netif_start_subqueue(dev, cpu);
1570 netif_carrier_on(dev);
1574 if (priv->loopify_channel >= 0) {
1575 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1576 netdev_warn(dev, "Failed to close loopify link!\n");
1577 priv->loopify_channel = -1;
1579 if (priv->channel >= 0) {
1580 if (gxio_mpipe_link_close(&priv->link) != 0)
1581 netdev_warn(dev, "Failed to close link!\n");
1584 priv->echannel = -1;
1585 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
1586 mutex_unlock(&tile_net_devs_for_channel_mutex);
1588 /* Don't return raw gxio error codes to generic Linux. */
1589 return (rc > -512) ? rc : -EIO;
1592 /* Help the kernel deactivate the given network interface. */
1593 static int tile_net_stop(struct net_device *dev)
1595 struct tile_net_priv *priv = netdev_priv(dev);
1597 int instance = priv->instance;
1598 struct mpipe_data *md = &mpipe_data[instance];
1600 for_each_online_cpu(cpu) {
1601 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1602 struct tile_net_tx_wake *tx_wake =
1603 &info->mpipe[instance].tx_wake[priv->echannel];
1605 hrtimer_cancel(&tx_wake->timer);
1606 netif_stop_subqueue(dev, cpu);
1609 mutex_lock(&tile_net_devs_for_channel_mutex);
1610 md->tile_net_devs_for_channel[priv->channel] = NULL;
1611 (void)tile_net_update(dev);
1612 if (priv->loopify_channel >= 0) {
1613 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1614 netdev_warn(dev, "Failed to close loopify link!\n");
1615 priv->loopify_channel = -1;
1617 if (priv->channel >= 0) {
1618 if (gxio_mpipe_link_close(&priv->link) != 0)
1619 netdev_warn(dev, "Failed to close link!\n");
1622 priv->echannel = -1;
1623 mutex_unlock(&tile_net_devs_for_channel_mutex);
1628 /* Determine the VA for a fragment. */
1629 static inline void *tile_net_frag_buf(skb_frag_t *f)
1631 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1632 return pfn_to_kaddr(pfn) + f->page_offset;
1635 /* Acquire a completion entry and an egress slot, or if we can't,
1636 * stop the queue and schedule the tx_wake timer.
1638 static s64 tile_net_equeue_try_reserve(struct net_device *dev,
1640 struct tile_net_comps *comps,
1641 gxio_mpipe_equeue_t *equeue,
1644 /* Try to acquire a completion entry. */
1645 if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
1646 tile_net_free_comps(equeue, comps, 32, false) != 0) {
1648 /* Try to acquire an egress slot. */
1649 s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1653 /* Freeing some completions gives the equeue time to drain. */
1654 tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
1656 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1661 /* Still nothing; give up and stop the queue for a short while. */
1662 netif_stop_subqueue(dev, tx_queue_idx);
1663 tile_net_schedule_tx_wake_timer(dev, tx_queue_idx);
1667 /* Determine how many edesc's are needed for TSO.
1669 * Sometimes, if "sendfile()" requires copying, we will be called with
1670 * "data" containing the header and payload, with "frags" being empty.
1671 * Sometimes, for example when using NFS over TCP, a single segment can
1672 * span 3 fragments. This requires special care.
1674 static int tso_count_edescs(struct sk_buff *skb)
1676 struct skb_shared_info *sh = skb_shinfo(skb);
1677 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1678 unsigned int data_len = skb->len - sh_len;
1679 unsigned int p_len = sh->gso_size;
1680 long f_id = -1; /* id of the current fragment */
1681 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1682 long f_used = 0; /* bytes used from the current fragment */
1683 long n; /* size of the current piece of payload */
1687 for (segment = 0; segment < sh->gso_segs; segment++) {
1689 unsigned int p_used = 0;
1691 /* One edesc for header and for each piece of the payload. */
1692 for (num_edescs++; p_used < p_len; num_edescs++) {
1694 /* Advance as needed. */
1695 while (f_used >= f_size) {
1697 f_size = skb_frag_size(&sh->frags[f_id]);
1701 /* Use bytes from the current fragment. */
1703 if (n > f_size - f_used)
1704 n = f_size - f_used;
1709 /* The last segment may be less than gso_size. */
1711 if (data_len < p_len)
1718 /* Prepare modified copies of the skbuff headers. */
1719 static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1722 struct skb_shared_info *sh = skb_shinfo(skb);
1724 struct ipv6hdr *ih6;
1726 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1727 unsigned int data_len = skb->len - sh_len;
1728 unsigned char *data = skb->data;
1729 unsigned int ih_off, th_off, p_len;
1730 unsigned int isum_seed, tsum_seed, seq;
1731 unsigned int uninitialized_var(id);
1733 long f_id = -1; /* id of the current fragment */
1734 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1735 long f_used = 0; /* bytes used from the current fragment */
1736 long n; /* size of the current piece of payload */
1739 /* Locate original headers and compute various lengths. */
1740 is_ipv6 = skb_is_gso_v6(skb);
1742 ih6 = ipv6_hdr(skb);
1743 ih_off = skb_network_offset(skb);
1746 ih_off = skb_network_offset(skb);
1747 isum_seed = ((0xFFFF - ih->check) +
1748 (0xFFFF - ih->tot_len) +
1754 th_off = skb_transport_offset(skb);
1755 p_len = sh->gso_size;
1757 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1758 seq = ntohl(th->seq);
1760 /* Prepare all the headers. */
1761 for (segment = 0; segment < sh->gso_segs; segment++) {
1763 unsigned int p_used = 0;
1765 /* Copy to the header memory for this segment. */
1766 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1768 memcpy(buf, data, sh_len);
1770 /* Update copied ip header. */
1772 ih6 = (struct ipv6hdr *)(buf + ih_off);
1773 ih6->payload_len = htons(sh_len + p_len - ih_off -
1776 ih = (struct iphdr *)(buf + ih_off);
1777 ih->tot_len = htons(sh_len + p_len - ih_off);
1778 ih->id = htons(id++);
1779 ih->check = csum_long(isum_seed + ih->tot_len +
1783 /* Update copied tcp header. */
1784 th = (struct tcphdr *)(buf + th_off);
1785 th->seq = htonl(seq);
1786 th->check = csum_long(tsum_seed + htons(sh_len + p_len));
1787 if (segment != sh->gso_segs - 1) {
1792 /* Skip past the header. */
1795 /* Skip past the payload. */
1796 while (p_used < p_len) {
1798 /* Advance as needed. */
1799 while (f_used >= f_size) {
1801 f_size = skb_frag_size(&sh->frags[f_id]);
1805 /* Use bytes from the current fragment. */
1807 if (n > f_size - f_used)
1808 n = f_size - f_used;
1817 /* The last segment may be less than gso_size. */
1819 if (data_len < p_len)
1823 /* Flush the headers so they are ready for hardware DMA. */
1827 /* Pass all the data to mpipe for egress. */
1828 static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1829 struct sk_buff *skb, unsigned char *headers, s64 slot)
1831 struct skb_shared_info *sh = skb_shinfo(skb);
1832 int instance = mpipe_instance(dev);
1833 struct mpipe_data *md = &mpipe_data[instance];
1834 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1835 unsigned int data_len = skb->len - sh_len;
1836 unsigned int p_len = sh->gso_size;
1837 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1838 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1839 long f_id = -1; /* id of the current fragment */
1840 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1841 long f_used = 0; /* bytes used from the current fragment */
1842 void *f_data = skb->data + sh_len;
1843 long n; /* size of the current piece of payload */
1844 unsigned long tx_packets = 0, tx_bytes = 0;
1845 unsigned int csum_start;
1848 /* Prepare to egress the headers: set up header edesc. */
1849 csum_start = skb_checksum_start_offset(skb);
1850 edesc_head.csum = 1;
1851 edesc_head.csum_start = csum_start;
1852 edesc_head.csum_dest = csum_start + skb->csum_offset;
1853 edesc_head.xfer_size = sh_len;
1855 /* This is only used to specify the TLB. */
1856 edesc_head.stack_idx = md->first_buffer_stack;
1857 edesc_body.stack_idx = md->first_buffer_stack;
1859 /* Egress all the edescs. */
1860 for (segment = 0; segment < sh->gso_segs; segment++) {
1862 unsigned int p_used = 0;
1864 /* Egress the header. */
1865 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1867 edesc_head.va = va_to_tile_io_addr(buf);
1868 gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
1871 /* Egress the payload. */
1872 while (p_used < p_len) {
1875 /* Advance as needed. */
1876 while (f_used >= f_size) {
1878 f_size = skb_frag_size(&sh->frags[f_id]);
1879 f_data = tile_net_frag_buf(&sh->frags[f_id]);
1883 va = f_data + f_used;
1885 /* Use bytes from the current fragment. */
1887 if (n > f_size - f_used)
1888 n = f_size - f_used;
1892 /* Egress a piece of the payload. */
1893 edesc_body.va = va_to_tile_io_addr(va);
1894 edesc_body.xfer_size = n;
1895 edesc_body.bound = !(p_used < p_len);
1896 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
1901 tx_bytes += sh_len + p_len;
1903 /* The last segment may be less than gso_size. */
1905 if (data_len < p_len)
1910 tile_net_stats_add(tx_packets, &dev->stats.tx_packets);
1911 tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes);
1914 /* Do "TSO" handling for egress.
1916 * Normally drivers set NETIF_F_TSO only to support hardware TSO;
1917 * otherwise the stack uses scatter-gather to implement GSO in software.
1918 * On our testing, enabling GSO support (via NETIF_F_SG) drops network
1919 * performance down to around 7.5 Gbps on the 10G interfaces, although
1920 * also dropping cpu utilization way down, to under 8%. But
1921 * implementing "TSO" in the driver brings performance back up to line
1922 * rate, while dropping cpu usage even further, to less than 4%. In
1923 * practice, profiling of GSO shows that skb_segment() is what causes
1924 * the performance overheads; we benefit in the driver from using
1925 * preallocated memory to duplicate the TCP/IP headers.
1927 static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1929 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
1930 struct tile_net_priv *priv = netdev_priv(dev);
1931 int channel = priv->echannel;
1932 int instance = priv->instance;
1933 struct mpipe_data *md = &mpipe_data[instance];
1934 struct tile_net_egress *egress = &md->egress_for_echannel[channel];
1935 struct tile_net_comps *comps =
1936 info->mpipe[instance].comps_for_echannel[channel];
1937 gxio_mpipe_equeue_t *equeue = egress->equeue;
1938 unsigned long irqflags;
1942 /* Determine how many mpipe edesc's are needed. */
1943 num_edescs = tso_count_edescs(skb);
1945 local_irq_save(irqflags);
1947 /* Try to acquire a completion entry and an egress slot. */
1948 slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
1949 equeue, num_edescs);
1951 local_irq_restore(irqflags);
1952 return NETDEV_TX_BUSY;
1955 /* Set up copies of header data properly. */
1956 tso_headers_prepare(skb, egress->headers, slot);
1958 /* Actually pass the data to the network hardware. */
1959 tso_egress(dev, equeue, skb, egress->headers, slot);
1961 /* Add a completion record. */
1962 add_comp(equeue, comps, slot + num_edescs - 1, skb);
1964 local_irq_restore(irqflags);
1966 /* Make sure the egress timer is scheduled. */
1967 tile_net_schedule_egress_timer();
1969 return NETDEV_TX_OK;
1972 /* Analyze the body and frags for a transmit request. */
1973 static unsigned int tile_net_tx_frags(struct frag *frags,
1974 struct sk_buff *skb,
1975 void *b_data, unsigned int b_len)
1977 unsigned int i, n = 0;
1979 struct skb_shared_info *sh = skb_shinfo(skb);
1982 frags[n].buf = b_data;
1983 frags[n++].length = b_len;
1986 for (i = 0; i < sh->nr_frags; i++) {
1987 skb_frag_t *f = &sh->frags[i];
1988 frags[n].buf = tile_net_frag_buf(f);
1989 frags[n++].length = skb_frag_size(f);
1995 /* Help the kernel transmit a packet. */
1996 static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1998 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
1999 struct tile_net_priv *priv = netdev_priv(dev);
2000 int instance = priv->instance;
2001 struct mpipe_data *md = &mpipe_data[instance];
2002 struct tile_net_egress *egress =
2003 &md->egress_for_echannel[priv->echannel];
2004 gxio_mpipe_equeue_t *equeue = egress->equeue;
2005 struct tile_net_comps *comps =
2006 info->mpipe[instance].comps_for_echannel[priv->echannel];
2007 unsigned int len = skb->len;
2008 unsigned char *data = skb->data;
2009 unsigned int num_edescs;
2010 struct frag frags[MAX_FRAGS];
2011 gxio_mpipe_edesc_t edescs[MAX_FRAGS];
2012 unsigned long irqflags;
2013 gxio_mpipe_edesc_t edesc = { { 0 } };
2017 if (skb_is_gso(skb))
2018 return tile_net_tx_tso(skb, dev);
2020 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
2022 /* This is only used to specify the TLB. */
2023 edesc.stack_idx = md->first_buffer_stack;
2025 /* Prepare the edescs. */
2026 for (i = 0; i < num_edescs; i++) {
2027 edesc.xfer_size = frags[i].length;
2028 edesc.va = va_to_tile_io_addr(frags[i].buf);
2032 /* Mark the final edesc. */
2033 edescs[num_edescs - 1].bound = 1;
2035 /* Add checksum info to the initial edesc, if needed. */
2036 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2037 unsigned int csum_start = skb_checksum_start_offset(skb);
2039 edescs[0].csum_start = csum_start;
2040 edescs[0].csum_dest = csum_start + skb->csum_offset;
2043 local_irq_save(irqflags);
2045 /* Try to acquire a completion entry and an egress slot. */
2046 slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
2047 equeue, num_edescs);
2049 local_irq_restore(irqflags);
2050 return NETDEV_TX_BUSY;
2053 for (i = 0; i < num_edescs; i++)
2054 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
2056 /* Store TX timestamp if needed. */
2057 tile_tx_timestamp(skb, instance);
2059 /* Add a completion record. */
2060 add_comp(equeue, comps, slot - 1, skb);
2062 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
2063 tile_net_stats_add(1, &dev->stats.tx_packets);
2064 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
2065 &dev->stats.tx_bytes);
2067 local_irq_restore(irqflags);
2069 /* Make sure the egress timer is scheduled. */
2070 tile_net_schedule_egress_timer();
2072 return NETDEV_TX_OK;
2075 /* Return subqueue id on this core (one per core). */
2076 static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
2077 void *accel_priv, select_queue_fallback_t fallback)
2079 return smp_processor_id();
2082 /* Deal with a transmit timeout. */
2083 static void tile_net_tx_timeout(struct net_device *dev)
2087 for_each_online_cpu(cpu)
2088 netif_wake_subqueue(dev, cpu);
2091 /* Ioctl commands. */
2092 static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2094 if (cmd == SIOCSHWTSTAMP)
2095 return tile_hwtstamp_set(dev, rq);
2096 if (cmd == SIOCGHWTSTAMP)
2097 return tile_hwtstamp_get(dev, rq);
2102 /* Change the MTU. */
2103 static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
2107 if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
2113 /* Change the Ethernet address of the NIC.
2115 * The hypervisor driver does not support changing MAC address. However,
2116 * the hardware does not do anything with the MAC address, so the address
2117 * which gets used on outgoing packets, and which is accepted on incoming
2118 * packets, is completely up to us.
2120 * Returns 0 on success, negative on failure.
2122 static int tile_net_set_mac_address(struct net_device *dev, void *p)
2124 struct sockaddr *addr = p;
2126 if (!is_valid_ether_addr(addr->sa_data))
2128 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2132 #ifdef CONFIG_NET_POLL_CONTROLLER
2133 /* Polling 'interrupt' - used by things like netconsole to send skbs
2134 * without having to re-enable interrupts. It's not called while
2135 * the interrupt routine is executing.
2137 static void tile_net_netpoll(struct net_device *dev)
2139 int instance = mpipe_instance(dev);
2140 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
2141 struct mpipe_data *md = &mpipe_data[instance];
2143 disable_percpu_irq(md->ingress_irq);
2144 napi_schedule(&info->mpipe[instance].napi);
2145 enable_percpu_irq(md->ingress_irq, 0);
2149 static const struct net_device_ops tile_net_ops = {
2150 .ndo_open = tile_net_open,
2151 .ndo_stop = tile_net_stop,
2152 .ndo_start_xmit = tile_net_tx,
2153 .ndo_select_queue = tile_net_select_queue,
2154 .ndo_do_ioctl = tile_net_ioctl,
2155 .ndo_change_mtu = tile_net_change_mtu,
2156 .ndo_tx_timeout = tile_net_tx_timeout,
2157 .ndo_set_mac_address = tile_net_set_mac_address,
2158 #ifdef CONFIG_NET_POLL_CONTROLLER
2159 .ndo_poll_controller = tile_net_netpoll,
2163 /* The setup function.
2165 * This uses ether_setup() to assign various fields in dev, including
2166 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
2168 static void tile_net_setup(struct net_device *dev)
2170 netdev_features_t features = 0;
2173 dev->netdev_ops = &tile_net_ops;
2174 dev->watchdog_timeo = TILE_NET_TIMEOUT;
2177 features |= NETIF_F_HW_CSUM;
2178 features |= NETIF_F_SG;
2179 features |= NETIF_F_TSO;
2180 features |= NETIF_F_TSO6;
2182 dev->hw_features |= features;
2183 dev->vlan_features |= features;
2184 dev->features |= features;
2187 /* Allocate the device structure, register the device, and obtain the
2188 * MAC address from the hypervisor.
2190 static void tile_net_dev_init(const char *name, const uint8_t *mac)
2193 struct net_device *dev;
2194 struct tile_net_priv *priv;
2196 /* HACK: Ignore "loop" links. */
2197 if (strncmp(name, "loop", 4) == 0)
2200 /* Allocate the device structure. Normally, "name" is a
2201 * template, instantiated by register_netdev(), but not for us.
2203 dev = alloc_netdev_mqs(sizeof(*priv), name, NET_NAME_UNKNOWN,
2204 tile_net_setup, NR_CPUS, 1);
2206 pr_err("alloc_netdev_mqs(%s) failed\n", name);
2210 /* Initialize "priv". */
2211 priv = netdev_priv(dev);
2214 priv->loopify_channel = -1;
2215 priv->echannel = -1;
2218 /* Get the MAC address and set it in the device struct; this must
2219 * be done before the device is opened. If the MAC is all zeroes,
2220 * we use a random address, since we're probably on the simulator.
2222 if (!is_zero_ether_addr(mac))
2223 ether_addr_copy(dev->dev_addr, mac);
2225 eth_hw_addr_random(dev);
2227 /* Register the network device. */
2228 ret = register_netdev(dev);
2230 netdev_err(dev, "register_netdev failed %d\n", ret);
2236 /* Per-cpu module initialization. */
2237 static void tile_net_init_module_percpu(void *unused)
2239 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
2240 int my_cpu = smp_processor_id();
2243 for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
2244 info->mpipe[instance].has_iqueue = false;
2245 info->mpipe[instance].instance = instance;
2247 info->my_cpu = my_cpu;
2249 /* Initialize the egress timer. */
2250 hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2251 info->egress_timer.function = tile_net_handle_egress_timer;
2254 /* Module initialization. */
2255 static int __init tile_net_init_module(void)
2258 char name[GXIO_MPIPE_LINK_NAME_LEN];
2261 pr_info("Tilera Network Driver\n");
2263 BUILD_BUG_ON(NR_MPIPE_MAX != 2);
2265 mutex_init(&tile_net_devs_for_channel_mutex);
2267 /* Initialize each CPU. */
2268 on_each_cpu(tile_net_init_module_percpu, NULL, 1);
2270 /* Find out what devices we have, and initialize them. */
2271 for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
2272 tile_net_dev_init(name, mac);
2274 if (!network_cpus_init())
2275 network_cpus_map = *cpu_online_mask;
2280 module_init(tile_net_init_module);