2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/tcp.h>
40 #include <linux/udp.h>
41 #include <linux/moduleparam.h>
43 #include <linux/slab.h>
47 #include <xen/xenbus.h>
48 #include <xen/events.h>
50 #include <xen/platform_pci.h>
51 #include <xen/grant_table.h>
53 #include <xen/interface/io/netif.h>
54 #include <xen/interface/memory.h>
55 #include <xen/interface/grant_table.h>
57 static const struct ethtool_ops xennet_ethtool_ops;
64 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
66 #define RX_COPY_THRESHOLD 256
68 #define GRANT_INVALID_REF 0
70 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
71 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
72 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
74 struct netfront_stats {
79 struct u64_stats_sync syncp;
82 struct netfront_info {
83 struct list_head list;
84 struct net_device *netdev;
86 struct napi_struct napi;
89 struct xenbus_device *xbdev;
92 struct xen_netif_tx_front_ring tx;
96 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
97 * are linked from tx_skb_freelist through skb_entry.link.
99 * NB. Freelist index entries are always going to be less than
100 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
101 * greater than PAGE_OFFSET: we use this property to distinguish
107 } tx_skbs[NET_TX_RING_SIZE];
108 grant_ref_t gref_tx_head;
109 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
110 unsigned tx_skb_freelist;
112 spinlock_t rx_lock ____cacheline_aligned_in_smp;
113 struct xen_netif_rx_front_ring rx;
116 /* Receive-ring batched refills. */
117 #define RX_MIN_TARGET 8
118 #define RX_DFL_MIN_TARGET 64
119 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
120 unsigned rx_min_target, rx_max_target, rx_target;
121 struct sk_buff_head rx_batch;
123 struct timer_list rx_refill_timer;
125 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
126 grant_ref_t gref_rx_head;
127 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
129 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
130 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
131 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
134 struct netfront_stats __percpu *stats;
136 unsigned long rx_gso_checksum_fixup;
139 struct netfront_rx_info {
140 struct xen_netif_rx_response rx;
141 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
144 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
149 static int skb_entry_is_link(const union skb_entry *list)
151 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
152 return (unsigned long)list->skb < PAGE_OFFSET;
156 * Access macros for acquiring freeing slots in tx_skbs[].
159 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
162 skb_entry_set_link(&list[id], *head);
166 static unsigned short get_id_from_freelist(unsigned *head,
167 union skb_entry *list)
169 unsigned int id = *head;
170 *head = list[id].link;
174 static int xennet_rxidx(RING_IDX idx)
176 return idx & (NET_RX_RING_SIZE - 1);
179 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
182 int i = xennet_rxidx(ri);
183 struct sk_buff *skb = np->rx_skbs[i];
184 np->rx_skbs[i] = NULL;
188 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
191 int i = xennet_rxidx(ri);
192 grant_ref_t ref = np->grant_rx_ref[i];
193 np->grant_rx_ref[i] = GRANT_INVALID_REF;
198 static int xennet_sysfs_addif(struct net_device *netdev);
199 static void xennet_sysfs_delif(struct net_device *netdev);
200 #else /* !CONFIG_SYSFS */
201 #define xennet_sysfs_addif(dev) (0)
202 #define xennet_sysfs_delif(dev) do { } while (0)
205 static bool xennet_can_sg(struct net_device *dev)
207 return dev->features & NETIF_F_SG;
211 static void rx_refill_timeout(unsigned long data)
213 struct net_device *dev = (struct net_device *)data;
214 struct netfront_info *np = netdev_priv(dev);
215 napi_schedule(&np->napi);
218 static int netfront_tx_slot_available(struct netfront_info *np)
220 return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
221 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
224 static void xennet_maybe_wake_tx(struct net_device *dev)
226 struct netfront_info *np = netdev_priv(dev);
228 if (unlikely(netif_queue_stopped(dev)) &&
229 netfront_tx_slot_available(np) &&
230 likely(netif_running(dev)))
231 netif_wake_queue(dev);
234 static void xennet_alloc_rx_buffers(struct net_device *dev)
237 struct netfront_info *np = netdev_priv(dev);
240 int i, batch_target, notify;
241 RING_IDX req_prod = np->rx.req_prod_pvt;
245 struct xen_netif_rx_request *req;
247 if (unlikely(!netif_carrier_ok(dev)))
251 * Allocate skbuffs greedily, even though we batch updates to the
252 * receive ring. This creates a less bursty demand on the memory
253 * allocator, so should reduce the chance of failed allocation requests
254 * both for ourself and for other kernel subsystems.
256 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
257 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
258 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
259 GFP_ATOMIC | __GFP_NOWARN);
263 /* Align ip header to a 16 bytes boundary */
264 skb_reserve(skb, NET_IP_ALIGN);
266 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
270 /* Any skbuffs queued for refill? Force them out. */
273 /* Could not allocate any skbuffs. Try again later. */
274 mod_timer(&np->rx_refill_timer,
279 __skb_fill_page_desc(skb, 0, page, 0, 0);
280 skb_shinfo(skb)->nr_frags = 1;
281 __skb_queue_tail(&np->rx_batch, skb);
284 /* Is the batch large enough to be worthwhile? */
285 if (i < (np->rx_target/2)) {
286 if (req_prod > np->rx.sring->req_prod)
291 /* Adjust our fill target if we risked running out of buffers. */
292 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
293 ((np->rx_target *= 2) > np->rx_max_target))
294 np->rx_target = np->rx_max_target;
298 skb = __skb_dequeue(&np->rx_batch);
304 id = xennet_rxidx(req_prod + i);
306 BUG_ON(np->rx_skbs[id]);
307 np->rx_skbs[id] = skb;
309 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
310 BUG_ON((signed short)ref < 0);
311 np->grant_rx_ref[id] = ref;
313 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
314 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
316 req = RING_GET_REQUEST(&np->rx, req_prod + i);
317 gnttab_grant_foreign_access_ref(ref,
318 np->xbdev->otherend_id,
326 wmb(); /* barrier so backend seens requests */
328 /* Above is a suitable barrier to ensure backend will see requests. */
329 np->rx.req_prod_pvt = req_prod + i;
331 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
333 notify_remote_via_irq(np->netdev->irq);
336 static int xennet_open(struct net_device *dev)
338 struct netfront_info *np = netdev_priv(dev);
340 napi_enable(&np->napi);
342 spin_lock_bh(&np->rx_lock);
343 if (netif_carrier_ok(dev)) {
344 xennet_alloc_rx_buffers(dev);
345 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
346 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
347 napi_schedule(&np->napi);
349 spin_unlock_bh(&np->rx_lock);
351 netif_start_queue(dev);
356 static void xennet_tx_buf_gc(struct net_device *dev)
360 struct netfront_info *np = netdev_priv(dev);
363 BUG_ON(!netif_carrier_ok(dev));
366 prod = np->tx.sring->rsp_prod;
367 rmb(); /* Ensure we see responses up to 'rp'. */
369 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
370 struct xen_netif_tx_response *txrsp;
372 txrsp = RING_GET_RESPONSE(&np->tx, cons);
373 if (txrsp->status == XEN_NETIF_RSP_NULL)
377 skb = np->tx_skbs[id].skb;
378 if (unlikely(gnttab_query_foreign_access(
379 np->grant_tx_ref[id]) != 0)) {
380 printk(KERN_ALERT "xennet_tx_buf_gc: warning "
381 "-- grant still in use by backend "
385 gnttab_end_foreign_access_ref(
386 np->grant_tx_ref[id], GNTMAP_readonly);
387 gnttab_release_grant_reference(
388 &np->gref_tx_head, np->grant_tx_ref[id]);
389 np->grant_tx_ref[id] = GRANT_INVALID_REF;
390 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
391 dev_kfree_skb_irq(skb);
394 np->tx.rsp_cons = prod;
397 * Set a new event, then check for race with update of tx_cons.
398 * Note that it is essential to schedule a callback, no matter
399 * how few buffers are pending. Even if there is space in the
400 * transmit ring, higher layers may be blocked because too much
401 * data is outstanding: in such cases notification from Xen is
402 * likely to be the only kick that we'll get.
404 np->tx.sring->rsp_event =
405 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
406 mb(); /* update shared area */
407 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
409 xennet_maybe_wake_tx(dev);
412 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
413 struct xen_netif_tx_request *tx)
415 struct netfront_info *np = netdev_priv(dev);
416 char *data = skb->data;
418 RING_IDX prod = np->tx.req_prod_pvt;
419 int frags = skb_shinfo(skb)->nr_frags;
420 unsigned int offset = offset_in_page(data);
421 unsigned int len = skb_headlen(skb);
426 /* While the header overlaps a page boundary (including being
427 larger than a page), split it it into page-sized chunks. */
428 while (len > PAGE_SIZE - offset) {
429 tx->size = PAGE_SIZE - offset;
430 tx->flags |= XEN_NETTXF_more_data;
435 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
436 np->tx_skbs[id].skb = skb_get(skb);
437 tx = RING_GET_REQUEST(&np->tx, prod++);
439 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
440 BUG_ON((signed short)ref < 0);
442 mfn = virt_to_mfn(data);
443 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
444 mfn, GNTMAP_readonly);
446 tx->gref = np->grant_tx_ref[id] = ref;
452 /* Grant backend access to each skb fragment page. */
453 for (i = 0; i < frags; i++) {
454 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
456 tx->flags |= XEN_NETTXF_more_data;
458 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
459 np->tx_skbs[id].skb = skb_get(skb);
460 tx = RING_GET_REQUEST(&np->tx, prod++);
462 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
463 BUG_ON((signed short)ref < 0);
465 mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
466 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
467 mfn, GNTMAP_readonly);
469 tx->gref = np->grant_tx_ref[id] = ref;
470 tx->offset = frag->page_offset;
471 tx->size = skb_frag_size(frag);
475 np->tx.req_prod_pvt = prod;
478 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
481 struct netfront_info *np = netdev_priv(dev);
482 struct netfront_stats *stats = this_cpu_ptr(np->stats);
483 struct xen_netif_tx_request *tx;
484 struct xen_netif_extra_info *extra;
485 char *data = skb->data;
490 int frags = skb_shinfo(skb)->nr_frags;
491 unsigned int offset = offset_in_page(data);
492 unsigned int len = skb_headlen(skb);
494 frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
495 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
496 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
502 spin_lock_irq(&np->tx_lock);
504 if (unlikely(!netif_carrier_ok(dev) ||
505 (frags > 1 && !xennet_can_sg(dev)) ||
506 netif_needs_gso(skb, netif_skb_features(skb)))) {
507 spin_unlock_irq(&np->tx_lock);
511 i = np->tx.req_prod_pvt;
513 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
514 np->tx_skbs[id].skb = skb;
516 tx = RING_GET_REQUEST(&np->tx, i);
519 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
520 BUG_ON((signed short)ref < 0);
521 mfn = virt_to_mfn(data);
522 gnttab_grant_foreign_access_ref(
523 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
524 tx->gref = np->grant_tx_ref[id] = ref;
530 if (skb->ip_summed == CHECKSUM_PARTIAL)
532 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
533 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 /* remote but checksummed. */
535 tx->flags |= XEN_NETTXF_data_validated;
537 if (skb_shinfo(skb)->gso_size) {
538 struct xen_netif_extra_info *gso;
540 gso = (struct xen_netif_extra_info *)
541 RING_GET_REQUEST(&np->tx, ++i);
544 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
546 tx->flags |= XEN_NETTXF_extra_info;
548 gso->u.gso.size = skb_shinfo(skb)->gso_size;
549 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
551 gso->u.gso.features = 0;
553 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
558 np->tx.req_prod_pvt = i + 1;
560 xennet_make_frags(skb, dev, tx);
563 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
565 notify_remote_via_irq(np->netdev->irq);
567 u64_stats_update_begin(&stats->syncp);
568 stats->tx_bytes += skb->len;
570 u64_stats_update_end(&stats->syncp);
572 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
573 xennet_tx_buf_gc(dev);
575 if (!netfront_tx_slot_available(np))
576 netif_stop_queue(dev);
578 spin_unlock_irq(&np->tx_lock);
583 dev->stats.tx_dropped++;
588 static int xennet_close(struct net_device *dev)
590 struct netfront_info *np = netdev_priv(dev);
591 netif_stop_queue(np->netdev);
592 napi_disable(&np->napi);
596 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
599 int new = xennet_rxidx(np->rx.req_prod_pvt);
601 BUG_ON(np->rx_skbs[new]);
602 np->rx_skbs[new] = skb;
603 np->grant_rx_ref[new] = ref;
604 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
605 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
606 np->rx.req_prod_pvt++;
609 static int xennet_get_extras(struct netfront_info *np,
610 struct xen_netif_extra_info *extras,
614 struct xen_netif_extra_info *extra;
615 struct device *dev = &np->netdev->dev;
616 RING_IDX cons = np->rx.rsp_cons;
623 if (unlikely(cons + 1 == rp)) {
625 dev_warn(dev, "Missing extra info\n");
630 extra = (struct xen_netif_extra_info *)
631 RING_GET_RESPONSE(&np->rx, ++cons);
633 if (unlikely(!extra->type ||
634 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
636 dev_warn(dev, "Invalid extra type: %d\n",
640 memcpy(&extras[extra->type - 1], extra,
644 skb = xennet_get_rx_skb(np, cons);
645 ref = xennet_get_rx_ref(np, cons);
646 xennet_move_rx_slot(np, skb, ref);
647 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
649 np->rx.rsp_cons = cons;
653 static int xennet_get_responses(struct netfront_info *np,
654 struct netfront_rx_info *rinfo, RING_IDX rp,
655 struct sk_buff_head *list)
657 struct xen_netif_rx_response *rx = &rinfo->rx;
658 struct xen_netif_extra_info *extras = rinfo->extras;
659 struct device *dev = &np->netdev->dev;
660 RING_IDX cons = np->rx.rsp_cons;
661 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
662 grant_ref_t ref = xennet_get_rx_ref(np, cons);
663 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
668 if (rx->flags & XEN_NETRXF_extra_info) {
669 err = xennet_get_extras(np, extras, rp);
670 cons = np->rx.rsp_cons;
674 if (unlikely(rx->status < 0 ||
675 rx->offset + rx->status > PAGE_SIZE)) {
677 dev_warn(dev, "rx->offset: %x, size: %u\n",
678 rx->offset, rx->status);
679 xennet_move_rx_slot(np, skb, ref);
685 * This definitely indicates a bug, either in this driver or in
686 * the backend driver. In future this should flag the bad
687 * situation to the system controller to reboot the backed.
689 if (ref == GRANT_INVALID_REF) {
691 dev_warn(dev, "Bad rx response id %d.\n",
697 ret = gnttab_end_foreign_access_ref(ref, 0);
700 gnttab_release_grant_reference(&np->gref_rx_head, ref);
702 __skb_queue_tail(list, skb);
705 if (!(rx->flags & XEN_NETRXF_more_data))
708 if (cons + frags == rp) {
710 dev_warn(dev, "Need more frags\n");
715 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
716 skb = xennet_get_rx_skb(np, cons + frags);
717 ref = xennet_get_rx_ref(np, cons + frags);
721 if (unlikely(frags > max)) {
723 dev_warn(dev, "Too many frags\n");
728 np->rx.rsp_cons = cons + frags;
733 static int xennet_set_skb_gso(struct sk_buff *skb,
734 struct xen_netif_extra_info *gso)
736 if (!gso->u.gso.size) {
738 printk(KERN_WARNING "GSO size must not be zero.\n");
742 /* Currently only TCPv4 S.O. is supported. */
743 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
745 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
749 skb_shinfo(skb)->gso_size = gso->u.gso.size;
750 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
752 /* Header must be checked, and gso_segs computed. */
753 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
754 skb_shinfo(skb)->gso_segs = 0;
759 static RING_IDX xennet_fill_frags(struct netfront_info *np,
761 struct sk_buff_head *list)
763 struct skb_shared_info *shinfo = skb_shinfo(skb);
764 int nr_frags = shinfo->nr_frags;
765 RING_IDX cons = np->rx.rsp_cons;
766 struct sk_buff *nskb;
768 while ((nskb = __skb_dequeue(list))) {
769 struct xen_netif_rx_response *rx =
770 RING_GET_RESPONSE(&np->rx, ++cons);
771 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
773 __skb_fill_page_desc(skb, nr_frags,
774 skb_frag_page(nfrag),
775 rx->offset, rx->status);
777 skb->data_len += rx->status;
779 skb_shinfo(nskb)->nr_frags = 0;
785 shinfo->nr_frags = nr_frags;
789 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
794 int recalculate_partial_csum = 0;
797 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
798 * peers can fail to set NETRXF_csum_blank when sending a GSO
799 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
800 * recalculate the partial checksum.
802 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
803 struct netfront_info *np = netdev_priv(dev);
804 np->rx_gso_checksum_fixup++;
805 skb->ip_summed = CHECKSUM_PARTIAL;
806 recalculate_partial_csum = 1;
809 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
810 if (skb->ip_summed != CHECKSUM_PARTIAL)
813 if (skb->protocol != htons(ETH_P_IP))
816 iph = (void *)skb->data;
817 th = skb->data + 4 * iph->ihl;
818 if (th >= skb_tail_pointer(skb))
821 skb->csum_start = th - skb->head;
822 switch (iph->protocol) {
824 skb->csum_offset = offsetof(struct tcphdr, check);
826 if (recalculate_partial_csum) {
827 struct tcphdr *tcph = (struct tcphdr *)th;
828 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
829 skb->len - iph->ihl*4,
834 skb->csum_offset = offsetof(struct udphdr, check);
836 if (recalculate_partial_csum) {
837 struct udphdr *udph = (struct udphdr *)th;
838 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
839 skb->len - iph->ihl*4,
845 printk(KERN_ERR "Attempting to checksum a non-"
846 "TCP/UDP packet, dropping a protocol"
847 " %d packet", iph->protocol);
851 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
860 static int handle_incoming_queue(struct net_device *dev,
861 struct sk_buff_head *rxq)
863 struct netfront_info *np = netdev_priv(dev);
864 struct netfront_stats *stats = this_cpu_ptr(np->stats);
865 int packets_dropped = 0;
868 while ((skb = __skb_dequeue(rxq)) != NULL) {
869 struct page *page = NETFRONT_SKB_CB(skb)->page;
870 void *vaddr = page_address(page);
871 unsigned offset = NETFRONT_SKB_CB(skb)->offset;
873 memcpy(skb->data, vaddr + offset,
876 if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
879 /* Ethernet work: Delayed to here as it peeks the header. */
880 skb->protocol = eth_type_trans(skb, dev);
882 if (checksum_setup(dev, skb)) {
885 dev->stats.rx_errors++;
889 u64_stats_update_begin(&stats->syncp);
891 stats->rx_bytes += skb->len;
892 u64_stats_update_end(&stats->syncp);
895 netif_receive_skb(skb);
898 return packets_dropped;
901 static int xennet_poll(struct napi_struct *napi, int budget)
903 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
904 struct net_device *dev = np->netdev;
906 struct netfront_rx_info rinfo;
907 struct xen_netif_rx_response *rx = &rinfo.rx;
908 struct xen_netif_extra_info *extras = rinfo.extras;
911 struct sk_buff_head rxq;
912 struct sk_buff_head errq;
913 struct sk_buff_head tmpq;
918 spin_lock(&np->rx_lock);
920 skb_queue_head_init(&rxq);
921 skb_queue_head_init(&errq);
922 skb_queue_head_init(&tmpq);
924 rp = np->rx.sring->rsp_prod;
925 rmb(); /* Ensure we see queued responses up to 'rp'. */
929 while ((i != rp) && (work_done < budget)) {
930 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
931 memset(extras, 0, sizeof(rinfo.extras));
933 err = xennet_get_responses(np, &rinfo, rp, &tmpq);
937 while ((skb = __skb_dequeue(&tmpq)))
938 __skb_queue_tail(&errq, skb);
939 dev->stats.rx_errors++;
944 skb = __skb_dequeue(&tmpq);
946 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
947 struct xen_netif_extra_info *gso;
948 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
950 if (unlikely(xennet_set_skb_gso(skb, gso))) {
951 __skb_queue_head(&tmpq, skb);
952 np->rx.rsp_cons += skb_queue_len(&tmpq);
957 NETFRONT_SKB_CB(skb)->page =
958 skb_frag_page(&skb_shinfo(skb)->frags[0]);
959 NETFRONT_SKB_CB(skb)->offset = rx->offset;
962 if (len > RX_COPY_THRESHOLD)
963 len = RX_COPY_THRESHOLD;
966 if (rx->status > len) {
967 skb_shinfo(skb)->frags[0].page_offset =
969 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
970 skb->data_len = rx->status - len;
972 __skb_fill_page_desc(skb, 0, NULL, 0, 0);
973 skb_shinfo(skb)->nr_frags = 0;
976 i = xennet_fill_frags(np, skb, &tmpq);
979 * Truesize approximates the size of true data plus
980 * any supervisor overheads. Adding hypervisor
981 * overheads has been shown to significantly reduce
982 * achievable bandwidth with the default receive
983 * buffer size. It is therefore not wise to account
986 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
987 * to RX_COPY_THRESHOLD + the supervisor
988 * overheads. Here, we add the size of the data pulled
989 * in xennet_fill_frags().
991 * We also adjust for any unused space in the main
992 * data area by subtracting (RX_COPY_THRESHOLD -
993 * len). This is especially important with drivers
994 * which split incoming packets into header and data,
995 * using only 66 bytes of the main data area (see the
996 * e1000 driver for example.) On such systems,
997 * without this last adjustement, our achievable
998 * receive throughout using the standard receive
999 * buffer size was cut by 25%(!!!).
1001 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
1002 skb->len += skb->data_len;
1004 if (rx->flags & XEN_NETRXF_csum_blank)
1005 skb->ip_summed = CHECKSUM_PARTIAL;
1006 else if (rx->flags & XEN_NETRXF_data_validated)
1007 skb->ip_summed = CHECKSUM_UNNECESSARY;
1009 __skb_queue_tail(&rxq, skb);
1011 np->rx.rsp_cons = ++i;
1015 __skb_queue_purge(&errq);
1017 work_done -= handle_incoming_queue(dev, &rxq);
1019 /* If we get a callback with very few responses, reduce fill target. */
1020 /* NB. Note exponential increase, linear decrease. */
1021 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1022 ((3*np->rx_target) / 4)) &&
1023 (--np->rx_target < np->rx_min_target))
1024 np->rx_target = np->rx_min_target;
1026 xennet_alloc_rx_buffers(dev);
1028 if (work_done < budget) {
1031 local_irq_save(flags);
1033 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1035 __napi_complete(napi);
1037 local_irq_restore(flags);
1040 spin_unlock(&np->rx_lock);
1045 static int xennet_change_mtu(struct net_device *dev, int mtu)
1047 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1055 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1056 struct rtnl_link_stats64 *tot)
1058 struct netfront_info *np = netdev_priv(dev);
1061 for_each_possible_cpu(cpu) {
1062 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1063 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1067 start = u64_stats_fetch_begin_bh(&stats->syncp);
1069 rx_packets = stats->rx_packets;
1070 tx_packets = stats->tx_packets;
1071 rx_bytes = stats->rx_bytes;
1072 tx_bytes = stats->tx_bytes;
1073 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1075 tot->rx_packets += rx_packets;
1076 tot->tx_packets += tx_packets;
1077 tot->rx_bytes += rx_bytes;
1078 tot->tx_bytes += tx_bytes;
1081 tot->rx_errors = dev->stats.rx_errors;
1082 tot->tx_dropped = dev->stats.tx_dropped;
1087 static void xennet_release_tx_bufs(struct netfront_info *np)
1089 struct sk_buff *skb;
1092 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1093 /* Skip over entries which are actually freelist references */
1094 if (skb_entry_is_link(&np->tx_skbs[i]))
1097 skb = np->tx_skbs[i].skb;
1098 gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1100 gnttab_release_grant_reference(&np->gref_tx_head,
1101 np->grant_tx_ref[i]);
1102 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1103 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1104 dev_kfree_skb_irq(skb);
1108 static void xennet_release_rx_bufs(struct netfront_info *np)
1110 struct mmu_update *mmu = np->rx_mmu;
1111 struct multicall_entry *mcl = np->rx_mcl;
1112 struct sk_buff_head free_list;
1113 struct sk_buff *skb;
1115 int xfer = 0, noxfer = 0, unused = 0;
1118 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1122 skb_queue_head_init(&free_list);
1124 spin_lock_bh(&np->rx_lock);
1126 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1127 ref = np->grant_rx_ref[id];
1128 if (ref == GRANT_INVALID_REF) {
1133 skb = np->rx_skbs[id];
1134 mfn = gnttab_end_foreign_transfer_ref(ref);
1135 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1136 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1139 skb_shinfo(skb)->nr_frags = 0;
1145 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1146 /* Remap the page. */
1147 const struct page *page =
1148 skb_frag_page(&skb_shinfo(skb)->frags[0]);
1149 unsigned long pfn = page_to_pfn(page);
1150 void *vaddr = page_address(page);
1152 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1153 mfn_pte(mfn, PAGE_KERNEL),
1156 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1157 | MMU_MACHPHYS_UPDATE;
1161 set_phys_to_machine(pfn, mfn);
1163 __skb_queue_tail(&free_list, skb);
1167 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1168 __func__, xfer, noxfer, unused);
1171 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1172 /* Do all the remapping work and M2P updates. */
1173 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1176 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1180 __skb_queue_purge(&free_list);
1182 spin_unlock_bh(&np->rx_lock);
1185 static void xennet_uninit(struct net_device *dev)
1187 struct netfront_info *np = netdev_priv(dev);
1188 xennet_release_tx_bufs(np);
1189 xennet_release_rx_bufs(np);
1190 gnttab_free_grant_references(np->gref_tx_head);
1191 gnttab_free_grant_references(np->gref_rx_head);
1194 static netdev_features_t xennet_fix_features(struct net_device *dev,
1195 netdev_features_t features)
1197 struct netfront_info *np = netdev_priv(dev);
1200 if (features & NETIF_F_SG) {
1201 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1206 features &= ~NETIF_F_SG;
1209 if (features & NETIF_F_TSO) {
1210 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1211 "feature-gso-tcpv4", "%d", &val) < 0)
1215 features &= ~NETIF_F_TSO;
1221 static int xennet_set_features(struct net_device *dev,
1222 netdev_features_t features)
1224 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1225 netdev_info(dev, "Reducing MTU because no SG offload");
1226 dev->mtu = ETH_DATA_LEN;
1232 static const struct net_device_ops xennet_netdev_ops = {
1233 .ndo_open = xennet_open,
1234 .ndo_uninit = xennet_uninit,
1235 .ndo_stop = xennet_close,
1236 .ndo_start_xmit = xennet_start_xmit,
1237 .ndo_change_mtu = xennet_change_mtu,
1238 .ndo_get_stats64 = xennet_get_stats64,
1239 .ndo_set_mac_address = eth_mac_addr,
1240 .ndo_validate_addr = eth_validate_addr,
1241 .ndo_fix_features = xennet_fix_features,
1242 .ndo_set_features = xennet_set_features,
1245 static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
1248 struct net_device *netdev;
1249 struct netfront_info *np;
1251 netdev = alloc_etherdev(sizeof(struct netfront_info));
1253 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1255 return ERR_PTR(-ENOMEM);
1258 np = netdev_priv(netdev);
1261 spin_lock_init(&np->tx_lock);
1262 spin_lock_init(&np->rx_lock);
1264 skb_queue_head_init(&np->rx_batch);
1265 np->rx_target = RX_DFL_MIN_TARGET;
1266 np->rx_min_target = RX_DFL_MIN_TARGET;
1267 np->rx_max_target = RX_MAX_TARGET;
1269 init_timer(&np->rx_refill_timer);
1270 np->rx_refill_timer.data = (unsigned long)netdev;
1271 np->rx_refill_timer.function = rx_refill_timeout;
1274 np->stats = alloc_percpu(struct netfront_stats);
1275 if (np->stats == NULL)
1278 /* Initialise tx_skbs as a free chain containing every entry. */
1279 np->tx_skb_freelist = 0;
1280 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1281 skb_entry_set_link(&np->tx_skbs[i], i+1);
1282 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1285 /* Clear out rx_skbs */
1286 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1287 np->rx_skbs[i] = NULL;
1288 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1291 /* A grant for every tx ring slot */
1292 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1293 &np->gref_tx_head) < 0) {
1294 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1296 goto exit_free_stats;
1298 /* A grant for every rx ring slot */
1299 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1300 &np->gref_rx_head) < 0) {
1301 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1306 netdev->netdev_ops = &xennet_netdev_ops;
1308 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1309 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1311 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
1314 * Assume that all hw features are available for now. This set
1315 * will be adjusted by the call to netdev_update_features() in
1316 * xennet_connect() which is the earliest point where we can
1317 * negotiate with the backend regarding supported features.
1319 netdev->features |= netdev->hw_features;
1321 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1322 SET_NETDEV_DEV(netdev, &dev->dev);
1324 np->netdev = netdev;
1326 netif_carrier_off(netdev);
1331 gnttab_free_grant_references(np->gref_tx_head);
1333 free_percpu(np->stats);
1335 free_netdev(netdev);
1336 return ERR_PTR(err);
1340 * Entry point to this code when a new device is created. Allocate the basic
1341 * structures and the ring buffers for communication with the backend, and
1342 * inform the backend of the appropriate details for those.
1344 static int __devinit netfront_probe(struct xenbus_device *dev,
1345 const struct xenbus_device_id *id)
1348 struct net_device *netdev;
1349 struct netfront_info *info;
1351 netdev = xennet_create_dev(dev);
1352 if (IS_ERR(netdev)) {
1353 err = PTR_ERR(netdev);
1354 xenbus_dev_fatal(dev, err, "creating netdev");
1358 info = netdev_priv(netdev);
1359 dev_set_drvdata(&dev->dev, info);
1361 err = register_netdev(info->netdev);
1363 printk(KERN_WARNING "%s: register_netdev err=%d\n",
1368 err = xennet_sysfs_addif(info->netdev);
1370 unregister_netdev(info->netdev);
1371 printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
1379 free_netdev(netdev);
1380 dev_set_drvdata(&dev->dev, NULL);
1384 static void xennet_end_access(int ref, void *page)
1386 /* This frees the page as a side-effect */
1387 if (ref != GRANT_INVALID_REF)
1388 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1391 static void xennet_disconnect_backend(struct netfront_info *info)
1393 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1394 spin_lock_bh(&info->rx_lock);
1395 spin_lock_irq(&info->tx_lock);
1396 netif_carrier_off(info->netdev);
1397 spin_unlock_irq(&info->tx_lock);
1398 spin_unlock_bh(&info->rx_lock);
1400 if (info->netdev->irq)
1401 unbind_from_irqhandler(info->netdev->irq, info->netdev);
1402 info->evtchn = info->netdev->irq = 0;
1404 /* End access and free the pages */
1405 xennet_end_access(info->tx_ring_ref, info->tx.sring);
1406 xennet_end_access(info->rx_ring_ref, info->rx.sring);
1408 info->tx_ring_ref = GRANT_INVALID_REF;
1409 info->rx_ring_ref = GRANT_INVALID_REF;
1410 info->tx.sring = NULL;
1411 info->rx.sring = NULL;
1415 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1416 * driver restart. We tear down our netif structure and recreate it, but
1417 * leave the device-layer structures intact so that this is transparent to the
1418 * rest of the kernel.
1420 static int netfront_resume(struct xenbus_device *dev)
1422 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1424 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1426 xennet_disconnect_backend(info);
1430 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1432 char *s, *e, *macstr;
1435 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1437 return PTR_ERR(macstr);
1439 for (i = 0; i < ETH_ALEN; i++) {
1440 mac[i] = simple_strtoul(s, &e, 16);
1441 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1452 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1454 struct net_device *dev = dev_id;
1455 struct netfront_info *np = netdev_priv(dev);
1456 unsigned long flags;
1458 spin_lock_irqsave(&np->tx_lock, flags);
1460 if (likely(netif_carrier_ok(dev))) {
1461 xennet_tx_buf_gc(dev);
1462 /* Under tx_lock: protects access to rx shared-ring indexes. */
1463 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1464 napi_schedule(&np->napi);
1467 spin_unlock_irqrestore(&np->tx_lock, flags);
1472 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1474 struct xen_netif_tx_sring *txs;
1475 struct xen_netif_rx_sring *rxs;
1477 struct net_device *netdev = info->netdev;
1479 info->tx_ring_ref = GRANT_INVALID_REF;
1480 info->rx_ring_ref = GRANT_INVALID_REF;
1481 info->rx.sring = NULL;
1482 info->tx.sring = NULL;
1485 err = xen_net_read_mac(dev, netdev->dev_addr);
1487 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1491 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1494 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1497 SHARED_RING_INIT(txs);
1498 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1500 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1502 free_page((unsigned long)txs);
1506 info->tx_ring_ref = err;
1507 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1510 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1513 SHARED_RING_INIT(rxs);
1514 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1516 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1518 free_page((unsigned long)rxs);
1521 info->rx_ring_ref = err;
1523 err = xenbus_alloc_evtchn(dev, &info->evtchn);
1527 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1528 0, netdev->name, netdev);
1538 /* Common code used when first setting up, and when resuming. */
1539 static int talk_to_netback(struct xenbus_device *dev,
1540 struct netfront_info *info)
1542 const char *message;
1543 struct xenbus_transaction xbt;
1546 /* Create shared ring, alloc event channel. */
1547 err = setup_netfront(dev, info);
1552 err = xenbus_transaction_start(&xbt);
1554 xenbus_dev_fatal(dev, err, "starting transaction");
1558 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1561 message = "writing tx ring-ref";
1562 goto abort_transaction;
1564 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1567 message = "writing rx ring-ref";
1568 goto abort_transaction;
1570 err = xenbus_printf(xbt, dev->nodename,
1571 "event-channel", "%u", info->evtchn);
1573 message = "writing event-channel";
1574 goto abort_transaction;
1577 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1580 message = "writing request-rx-copy";
1581 goto abort_transaction;
1584 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1586 message = "writing feature-rx-notify";
1587 goto abort_transaction;
1590 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1592 message = "writing feature-sg";
1593 goto abort_transaction;
1596 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1598 message = "writing feature-gso-tcpv4";
1599 goto abort_transaction;
1602 err = xenbus_transaction_end(xbt, 0);
1606 xenbus_dev_fatal(dev, err, "completing transaction");
1613 xenbus_transaction_end(xbt, 1);
1614 xenbus_dev_fatal(dev, err, "%s", message);
1616 xennet_disconnect_backend(info);
1621 static int xennet_connect(struct net_device *dev)
1623 struct netfront_info *np = netdev_priv(dev);
1624 int i, requeue_idx, err;
1625 struct sk_buff *skb;
1627 struct xen_netif_rx_request *req;
1628 unsigned int feature_rx_copy;
1630 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1631 "feature-rx-copy", "%u", &feature_rx_copy);
1633 feature_rx_copy = 0;
1635 if (!feature_rx_copy) {
1637 "backend does not support copying receive path\n");
1641 err = talk_to_netback(np->xbdev, np);
1646 netdev_update_features(dev);
1649 spin_lock_bh(&np->rx_lock);
1650 spin_lock_irq(&np->tx_lock);
1652 /* Step 1: Discard all pending TX packet fragments. */
1653 xennet_release_tx_bufs(np);
1655 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1656 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1658 const struct page *page;
1659 if (!np->rx_skbs[i])
1662 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1663 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1664 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1666 frag = &skb_shinfo(skb)->frags[0];
1667 page = skb_frag_page(frag);
1668 gnttab_grant_foreign_access_ref(
1669 ref, np->xbdev->otherend_id,
1670 pfn_to_mfn(page_to_pfn(page)),
1673 req->id = requeue_idx;
1678 np->rx.req_prod_pvt = requeue_idx;
1681 * Step 3: All public and private state should now be sane. Get
1682 * ready to start sending and receiving packets and give the driver
1683 * domain a kick because we've probably just requeued some
1686 netif_carrier_on(np->netdev);
1687 notify_remote_via_irq(np->netdev->irq);
1688 xennet_tx_buf_gc(dev);
1689 xennet_alloc_rx_buffers(dev);
1691 spin_unlock_irq(&np->tx_lock);
1692 spin_unlock_bh(&np->rx_lock);
1698 * Callback received when the backend's state changes.
1700 static void netback_changed(struct xenbus_device *dev,
1701 enum xenbus_state backend_state)
1703 struct netfront_info *np = dev_get_drvdata(&dev->dev);
1704 struct net_device *netdev = np->netdev;
1706 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1708 switch (backend_state) {
1709 case XenbusStateInitialising:
1710 case XenbusStateInitialised:
1711 case XenbusStateReconfiguring:
1712 case XenbusStateReconfigured:
1713 case XenbusStateUnknown:
1714 case XenbusStateClosed:
1717 case XenbusStateInitWait:
1718 if (dev->state != XenbusStateInitialising)
1720 if (xennet_connect(netdev) != 0)
1722 xenbus_switch_state(dev, XenbusStateConnected);
1725 case XenbusStateConnected:
1726 netif_notify_peers(netdev);
1729 case XenbusStateClosing:
1730 xenbus_frontend_closed(dev);
1735 static const struct xennet_stat {
1736 char name[ETH_GSTRING_LEN];
1738 } xennet_stats[] = {
1740 "rx_gso_checksum_fixup",
1741 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1745 static int xennet_get_sset_count(struct net_device *dev, int string_set)
1747 switch (string_set) {
1749 return ARRAY_SIZE(xennet_stats);
1755 static void xennet_get_ethtool_stats(struct net_device *dev,
1756 struct ethtool_stats *stats, u64 * data)
1758 void *np = netdev_priv(dev);
1761 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1762 data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1765 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1769 switch (stringset) {
1771 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1772 memcpy(data + i * ETH_GSTRING_LEN,
1773 xennet_stats[i].name, ETH_GSTRING_LEN);
1778 static const struct ethtool_ops xennet_ethtool_ops =
1780 .get_link = ethtool_op_get_link,
1782 .get_sset_count = xennet_get_sset_count,
1783 .get_ethtool_stats = xennet_get_ethtool_stats,
1784 .get_strings = xennet_get_strings,
1788 static ssize_t show_rxbuf_min(struct device *dev,
1789 struct device_attribute *attr, char *buf)
1791 struct net_device *netdev = to_net_dev(dev);
1792 struct netfront_info *info = netdev_priv(netdev);
1794 return sprintf(buf, "%u\n", info->rx_min_target);
1797 static ssize_t store_rxbuf_min(struct device *dev,
1798 struct device_attribute *attr,
1799 const char *buf, size_t len)
1801 struct net_device *netdev = to_net_dev(dev);
1802 struct netfront_info *np = netdev_priv(netdev);
1804 unsigned long target;
1806 if (!capable(CAP_NET_ADMIN))
1809 target = simple_strtoul(buf, &endp, 0);
1813 if (target < RX_MIN_TARGET)
1814 target = RX_MIN_TARGET;
1815 if (target > RX_MAX_TARGET)
1816 target = RX_MAX_TARGET;
1818 spin_lock_bh(&np->rx_lock);
1819 if (target > np->rx_max_target)
1820 np->rx_max_target = target;
1821 np->rx_min_target = target;
1822 if (target > np->rx_target)
1823 np->rx_target = target;
1825 xennet_alloc_rx_buffers(netdev);
1827 spin_unlock_bh(&np->rx_lock);
1831 static ssize_t show_rxbuf_max(struct device *dev,
1832 struct device_attribute *attr, char *buf)
1834 struct net_device *netdev = to_net_dev(dev);
1835 struct netfront_info *info = netdev_priv(netdev);
1837 return sprintf(buf, "%u\n", info->rx_max_target);
1840 static ssize_t store_rxbuf_max(struct device *dev,
1841 struct device_attribute *attr,
1842 const char *buf, size_t len)
1844 struct net_device *netdev = to_net_dev(dev);
1845 struct netfront_info *np = netdev_priv(netdev);
1847 unsigned long target;
1849 if (!capable(CAP_NET_ADMIN))
1852 target = simple_strtoul(buf, &endp, 0);
1856 if (target < RX_MIN_TARGET)
1857 target = RX_MIN_TARGET;
1858 if (target > RX_MAX_TARGET)
1859 target = RX_MAX_TARGET;
1861 spin_lock_bh(&np->rx_lock);
1862 if (target < np->rx_min_target)
1863 np->rx_min_target = target;
1864 np->rx_max_target = target;
1865 if (target < np->rx_target)
1866 np->rx_target = target;
1868 xennet_alloc_rx_buffers(netdev);
1870 spin_unlock_bh(&np->rx_lock);
1874 static ssize_t show_rxbuf_cur(struct device *dev,
1875 struct device_attribute *attr, char *buf)
1877 struct net_device *netdev = to_net_dev(dev);
1878 struct netfront_info *info = netdev_priv(netdev);
1880 return sprintf(buf, "%u\n", info->rx_target);
1883 static struct device_attribute xennet_attrs[] = {
1884 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1885 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1886 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1889 static int xennet_sysfs_addif(struct net_device *netdev)
1894 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1895 err = device_create_file(&netdev->dev,
1904 device_remove_file(&netdev->dev, &xennet_attrs[i]);
1908 static void xennet_sysfs_delif(struct net_device *netdev)
1912 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
1913 device_remove_file(&netdev->dev, &xennet_attrs[i]);
1916 #endif /* CONFIG_SYSFS */
1918 static const struct xenbus_device_id netfront_ids[] = {
1924 static int __devexit xennet_remove(struct xenbus_device *dev)
1926 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1928 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1930 unregister_netdev(info->netdev);
1932 xennet_disconnect_backend(info);
1934 del_timer_sync(&info->rx_refill_timer);
1936 xennet_sysfs_delif(info->netdev);
1938 free_percpu(info->stats);
1940 free_netdev(info->netdev);
1945 static DEFINE_XENBUS_DRIVER(netfront, ,
1946 .probe = netfront_probe,
1947 .remove = __devexit_p(xennet_remove),
1948 .resume = netfront_resume,
1949 .otherend_changed = netback_changed,
1952 static int __init netif_init(void)
1957 if (xen_initial_domain())
1960 if (xen_hvm_domain() && !xen_platform_pci_unplug)
1963 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
1965 return xenbus_register_frontend(&netfront_driver);
1967 module_init(netif_init);
1970 static void __exit netif_exit(void)
1972 if (xen_initial_domain())
1975 xenbus_unregister_driver(&netfront_driver);
1977 module_exit(netif_exit);
1979 MODULE_DESCRIPTION("Xen virtual network device frontend");
1980 MODULE_LICENSE("GPL");
1981 MODULE_ALIAS("xen:vif");
1982 MODULE_ALIAS("xennet");